From ba872b9b45bd788fefe7eb3286d669ed50495872 Mon Sep 17 00:00:00 2001 From: Arkadiy Kukarkin Date: Wed, 4 Feb 2026 12:08:45 +0100 Subject: [PATCH 1/3] bump go-swagger to v0.33.1 --- .github/workflows/devcontainer-podman.yml | 2 +- singularity.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/devcontainer-podman.yml b/.github/workflows/devcontainer-podman.yml index 00dafdd2..5e9584b9 100644 --- a/.github/workflows/devcontainer-podman.yml +++ b/.github/workflows/devcontainer-podman.yml @@ -42,7 +42,7 @@ jobs: uses: parkan/github-actions/devcontainer-exec@v2 with: container-id: ${{ steps.build.outputs.container-id }} - command: 'cd /workspaces/singularity && mkdir -p client/swagger/client && go install github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 && go generate ./client/swagger/...' + command: 'cd /workspaces/singularity && go generate ./...' container-runtime: podman - name: Check formatting diff --git a/singularity.go b/singularity.go index 62a71e90..7ccf5bb7 100644 --- a/singularity.go +++ b/singularity.go @@ -16,7 +16,7 @@ import ( //go:generate rm -rf ./docs/en/web-api-reference //go:generate go run docs/gen/webapireference/main.go //go:generate rm -rf ./client -//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate client -f ./docs/swagger/swagger.json -t . -c client/swagger/http -m client/swagger/models -a client/swagger/operations -q +//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.33.1 generate client -f ./docs/swagger/swagger.json -t . -c client/swagger/http -m client/swagger/models -a client/swagger/operations -q //go:embed version.json var versionJSON []byte From 173380824cf1795a8c0b87b2482aa3c3e3083ac4 Mon Sep 17 00:00:00 2001 From: Arkadiy Kukarkin Date: Wed, 4 Feb 2026 12:20:54 +0100 Subject: [PATCH 2/3] remove toolchain directive from go.mod --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index a95b29ec..fc49ae75 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,6 @@ module github.com/data-preservation-programs/singularity go 1.24.6 -toolchain go1.24.9 require ( github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b From bf6adcb7f1039d4f2c0be8e6f8fbf5c47d85a3d8 Mon Sep 17 00:00:00 2001 From: Arkadiy Kukarkin Date: Wed, 4 Feb 2026 12:29:47 +0100 Subject: [PATCH 3/3] regenerate swagger client with go-swagger v0.33.1 --- client/swagger/http/admin/admin_client.go | 13 +- .../http/admin/set_identity_responses.go | 7 +- client/swagger/http/deal/deal_client.go | 26 +- .../swagger/http/deal/list_deals_responses.go | 9 +- .../http/deal/send_manual_responses.go | 9 +- .../create_schedule_responses.go | 9 +- .../deal_schedule/deal_schedule_client.go | 91 +- .../list_preparation_schedules_responses.go | 9 +- .../deal_schedule/list_schedules_responses.go | 9 +- .../deal_schedule/pause_schedule_responses.go | 9 +- .../remove_schedule_responses.go | 7 +- .../resume_schedule_responses.go | 9 +- .../update_schedule_responses.go | 9 +- client/swagger/http/file/file_client.go | 62 +- .../http/file/get_file_deals_responses.go | 7 +- .../swagger/http/file/get_file_responses.go | 7 +- .../file/prepare_to_pack_file_responses.go | 9 +- .../swagger/http/file/push_file_responses.go | 9 +- .../http/file/retrieve_file_responses.go | 13 +- client/swagger/http/job/job_client.go | 104 +- client/swagger/http/job/pack_responses.go | 9 +- .../http/job/pause_dag_gen_responses.go | 9 +- .../swagger/http/job/pause_pack_responses.go | 9 +- .../swagger/http/job/pause_scan_responses.go | 9 +- .../job/prepare_to_pack_source_responses.go | 7 +- .../http/job/start_dag_gen_responses.go | 9 +- .../swagger/http/job/start_pack_responses.go | 9 +- .../swagger/http/job/start_scan_responses.go | 9 +- .../swagger/http/piece/add_piece_responses.go | 9 +- .../http/piece/delete_piece_parameters.go | 197 + .../http/piece/delete_piece_responses.go | 321 + .../piece/get_piece_id_metadata_responses.go | 11 +- .../http/piece/list_pieces_responses.go | 9 +- client/swagger/http/piece/piece_client.go | 88 +- .../add_output_storage_responses.go | 9 +- .../add_source_storage_responses.go | 9 +- .../create_preparation_responses.go | 9 +- .../explore_preparation_responses.go | 9 +- .../get_preparation_status_responses.go | 9 +- .../list_preparations_responses.go | 9 +- .../http/preparation/preparation_client.go | 117 +- .../remove_output_storage_responses.go | 9 +- .../remove_preparation_responses.go | 7 +- .../rename_preparation_responses.go | 9 +- client/swagger/http/singularity_api_client.go | 2 +- .../storage/create_acd_storage_parameters.go | 153 - .../storage/create_acd_storage_responses.go | 258 - .../create_azureblob_storage_responses.go | 9 +- .../storage/create_b2_storage_responses.go | 9 +- .../storage/create_box_storage_responses.go | 9 +- .../storage/create_drive_storage_responses.go | 9 +- .../create_dropbox_storage_responses.go | 9 +- .../create_fichier_storage_responses.go | 9 +- .../create_filefabric_storage_responses.go | 9 +- .../storage/create_ftp_storage_responses.go | 9 +- .../storage/create_gcs_storage_responses.go | 9 +- .../create_gphotos_storage_responses.go | 9 +- .../storage/create_hdfs_storage_responses.go | 9 +- .../create_hidrive_storage_responses.go | 9 +- .../storage/create_http_storage_responses.go | 9 +- ...reate_internetarchive_storage_responses.go | 9 +- .../create_jottacloud_storage_responses.go | 9 +- ...ate_koofr_digistorage_storage_responses.go | 9 +- .../create_koofr_koofr_storage_responses.go | 9 +- .../create_koofr_other_storage_responses.go | 9 +- .../storage/create_local_storage_responses.go | 9 +- .../create_mailru_storage_responses.go | 9 +- .../storage/create_mega_storage_responses.go | 9 +- .../create_netstorage_storage_responses.go | 9 +- .../create_onedrive_storage_responses.go | 9 +- .../create_oos_env_auth_storage_responses.go | 9 +- ...stance_principal_auth_storage_responses.go | 9 +- .../create_oos_no_auth_storage_responses.go | 9 +- ...source_principal_auth_storage_responses.go | 9 +- ...s_user_principal_auth_storage_responses.go | 9 +- ...rkload_identity_auth_storage_parameters.go | 153 + ...orkload_identity_auth_storage_responses.go | 259 + .../create_opendrive_storage_responses.go | 9 +- .../create_pcloud_storage_responses.go | 9 +- .../create_premiumizeme_storage_responses.go | 9 +- .../storage/create_putio_storage_responses.go | 9 +- .../create_qingstor_storage_responses.go | 9 +- .../create_s3_a_w_s_storage_responses.go | 9 +- .../create_s3_alibaba_storage_responses.go | 9 +- ...create_s3_arvan_cloud_storage_responses.go | 9 +- .../create_s3_ceph_storage_responses.go | 9 +- ...reate_s3_china_mobile_storage_responses.go | 9 +- .../create_s3_cloudflare_storage_responses.go | 9 +- ...eate_s3_digital_ocean_storage_responses.go | 9 +- .../create_s3_dreamhost_storage_responses.go | 9 +- .../create_s3_g_c_s_storage_parameters.go | 153 + .../create_s3_g_c_s_storage_responses.go | 259 + ...reate_s3_huawei_o_b_s_storage_responses.go | 9 +- ...create_s3_i_b_m_c_o_s_storage_responses.go | 9 +- .../create_s3_i_drive_storage_responses.go | 9 +- .../create_s3_i_o_n_o_s_storage_responses.go | 9 +- .../create_s3_leviia_storage_parameters.go | 153 + .../create_s3_leviia_storage_responses.go | 259 + .../create_s3_liara_storage_responses.go | 9 +- .../create_s3_linode_storage_parameters.go | 153 + .../create_s3_linode_storage_responses.go | 259 + .../create_s3_lyve_cloud_storage_responses.go | 9 +- .../create_s3_magalu_storage_parameters.go | 153 + .../create_s3_magalu_storage_responses.go | 259 + .../create_s3_minio_storage_responses.go | 9 +- .../create_s3_netease_storage_responses.go | 9 +- .../create_s3_other_storage_responses.go | 9 +- .../create_s3_petabox_storage_parameters.go | 153 + .../create_s3_petabox_storage_responses.go | 259 + .../create_s3_qiniu_storage_responses.go | 9 +- .../create_s3_rack_corp_storage_responses.go | 9 +- .../create_s3_rclone_storage_parameters.go | 153 + .../create_s3_rclone_storage_responses.go | 259 + .../create_s3_scaleway_storage_responses.go | 9 +- ...create_s3_seaweed_f_s_storage_responses.go | 9 +- .../create_s3_stack_path_storage_responses.go | 9 +- .../create_s3_storj_storage_responses.go | 9 +- .../create_s3_synology_storage_parameters.go | 153 + .../create_s3_synology_storage_responses.go | 259 + ...eate_s3_tencent_c_o_s_storage_responses.go | 9 +- .../create_s3_wasabi_storage_responses.go | 9 +- .../create_seafile_storage_responses.go | 9 +- .../storage/create_sftp_storage_responses.go | 9 +- .../create_sharefile_storage_responses.go | 9 +- .../storage/create_sia_storage_responses.go | 9 +- .../storage/create_smb_storage_responses.go | 9 +- ...create_storj_existing_storage_responses.go | 9 +- .../create_storj_new_storage_responses.go | 9 +- .../create_sugarsync_storage_responses.go | 9 +- .../storage/create_swift_storage_responses.go | 9 +- .../storage/create_union_storage_responses.go | 9 +- .../create_uptobox_storage_responses.go | 9 +- .../create_webdav_storage_responses.go | 9 +- .../create_yandex_storage_responses.go | 9 +- .../storage/create_zoho_storage_responses.go | 9 +- .../http/storage/explore_storage_responses.go | 9 +- .../http/storage/list_storages_responses.go | 9 +- .../http/storage/remove_storage_responses.go | 7 +- .../http/storage/rename_storage_responses.go | 9 +- client/swagger/http/storage/storage_client.go | 1433 ++- .../http/storage/update_storage_responses.go | 9 +- .../http/wallet/import_wallet_responses.go | 9 +- .../http/wallet/list_wallets_responses.go | 9 +- .../http/wallet/remove_wallet_responses.go | 7 +- client/swagger/http/wallet/wallet_client.go | 39 +- .../attach_wallet_responses.go | 9 +- .../detach_wallet_responses.go | 9 +- .../list_attached_wallets_responses.go | 9 +- .../wallet_association_client.go | 39 +- .../models/dataprep_delete_piece_request.go | 53 + client/swagger/models/dataprep_dir_entry.go | 17 +- .../swagger/models/dataprep_explore_result.go | 17 +- client/swagger/models/dataprep_piece_list.go | 33 +- .../swagger/models/deal_list_deal_request.go | 17 +- .../models/file_deals_for_file_range.go | 33 +- client/swagger/models/job_source_status.go | 49 +- client/swagger/models/model_car.go | 5 +- client/swagger/models/model_deal.go | 17 +- client/swagger/models/model_deal_state.go | 2 +- client/swagger/models/model_file.go | 19 +- client/swagger/models/model_job.go | 35 +- client/swagger/models/model_job_state.go | 2 +- client/swagger/models/model_job_type.go | 2 +- client/swagger/models/model_preparation.go | 33 +- client/swagger/models/model_schedule.go | 33 +- client/swagger/models/model_schedule_state.go | 2 +- client/swagger/models/model_storage.go | 33 +- client/swagger/models/storage_acd_config.go | 74 - .../models/storage_azureblob_config.go | 15 +- client/swagger/models/storage_b2_config.go | 15 +- client/swagger/models/storage_box_config.go | 6 + ..._workload_identity_auth_storage_request.go | 117 + ...storage_create_s3_g_c_s_storage_request.go | 117 + ...torage_create_s3_leviia_storage_request.go | 117 + ...torage_create_s3_linode_storage_request.go | 117 + ...torage_create_s3_magalu_storage_request.go | 117 + ...rage_create_s3_petabox_storage_request.go} | 30 +- ...torage_create_s3_rclone_storage_request.go | 117 + ...rage_create_s3_synology_storage_request.go | 117 + client/swagger/models/storage_drive_config.go | 31 +- .../swagger/models/storage_dropbox_config.go | 9 + .../swagger/models/storage_fichier_config.go | 6 + .../models/storage_filefabric_config.go | 3 + client/swagger/models/storage_ftp_config.go | 6 + client/swagger/models/storage_gcs_config.go | 9 + .../swagger/models/storage_gphotos_config.go | 15 + client/swagger/models/storage_hdfs_config.go | 5 +- .../swagger/models/storage_hidrive_config.go | 3 + client/swagger/models/storage_http_config.go | 6 + .../models/storage_internetarchive_config.go | 3 + .../models/storage_jottacloud_config.go | 18 + .../storage_koofr_digistorage_config.go | 5 +- .../models/storage_koofr_koofr_config.go | 5 +- .../models/storage_koofr_other_config.go | 3 + client/swagger/models/storage_local_config.go | 10 + .../swagger/models/storage_mailru_config.go | 18 + client/swagger/models/storage_mega_config.go | 3 + .../models/storage_netstorage_config.go | 3 + .../swagger/models/storage_onedrive_config.go | 18 +- .../models/storage_oos_env_auth_config.go | 13 +- ...rage_oos_instance_principal_auth_config.go | 13 +- .../models/storage_oos_no_auth_config.go | 13 +- ...rage_oos_resource_principal_auth_config.go | 13 +- .../storage_oos_user_principal_auth_config.go | 13 +- ...orage_oos_workload_identity_auth_config.go | 114 + .../models/storage_opendrive_config.go | 3 + .../swagger/models/storage_pcloud_config.go | 3 + .../models/storage_premiumizeme_config.go | 18 + client/swagger/models/storage_putio_config.go | 18 + .../swagger/models/storage_qingstor_config.go | 3 + .../swagger/models/storage_s3_a_w_s_config.go | 35 +- .../models/storage_s3_alibaba_config.go | 33 +- .../models/storage_s3_arvan_cloud_config.go | 35 +- .../swagger/models/storage_s3_ceph_config.go | 33 +- .../models/storage_s3_china_mobile_config.go | 33 +- .../models/storage_s3_cloudflare_config.go | 33 +- .../models/storage_s3_digital_ocean_config.go | 33 +- .../models/storage_s3_dreamhost_config.go | 33 +- .../swagger/models/storage_s3_g_c_s_config.go | 188 + .../models/storage_s3_huawei_o_b_s_config.go | 33 +- .../models/storage_s3_i_b_m_c_o_s_config.go | 33 +- .../models/storage_s3_i_drive_config.go | 33 +- .../models/storage_s3_i_o_n_o_s_config.go | 33 +- .../models/storage_s3_leviia_config.go | 184 + .../swagger/models/storage_s3_liara_config.go | 33 +- .../models/storage_s3_linode_config.go | 182 + .../models/storage_s3_lyve_cloud_config.go | 33 +- .../models/storage_s3_magalu_config.go | 186 + .../swagger/models/storage_s3_minio_config.go | 33 +- .../models/storage_s3_netease_config.go | 33 +- .../swagger/models/storage_s3_other_config.go | 33 +- .../models/storage_s3_petabox_config.go | 186 + .../swagger/models/storage_s3_qiniu_config.go | 33 +- .../models/storage_s3_rack_corp_config.go | 33 +- .../models/storage_s3_rclone_config.go | 187 + .../models/storage_s3_scaleway_config.go | 33 +- .../models/storage_s3_seaweed_f_s_config.go | 33 +- .../models/storage_s3_stack_path_config.go | 33 +- .../swagger/models/storage_s3_storj_config.go | 33 +- .../models/storage_s3_synology_config.go | 186 + .../models/storage_s3_tencent_c_o_s_config.go | 33 +- .../models/storage_s3_wasabi_config.go | 33 +- .../swagger/models/storage_seafile_config.go | 3 + client/swagger/models/storage_sftp_config.go | 18 + .../models/storage_sharefile_config.go | 18 + client/swagger/models/storage_sia_config.go | 3 + client/swagger/models/storage_smb_config.go | 3 + .../models/storage_storj_existing_config.go | 3 + .../models/storage_storj_new_config.go | 3 + .../models/storage_sugarsync_config.go | 3 + client/swagger/models/storage_swift_config.go | 14 +- client/swagger/models/storage_union_config.go | 3 + .../swagger/models/storage_uptobox_config.go | 6 + .../swagger/models/storage_webdav_config.go | 20 +- .../swagger/models/storage_yandex_config.go | 6 + client/swagger/models/storage_zoho_config.go | 3 + client/swagger/models/store_piece_reader.go | 2 +- docs/en/SUMMARY.md | 21 +- docs/en/cli-reference/admin/README.md | 6 +- docs/en/cli-reference/download.md | 69 +- docs/en/cli-reference/prep/README.md | 5 +- docs/en/cli-reference/prep/delete-piece.md | 19 + docs/en/cli-reference/run/download-server.md | 65 +- .../en/cli-reference/storage/create/README.md | 5 +- docs/en/cli-reference/storage/create/acd.md | 124 - .../cli-reference/storage/create/azureblob.md | 47 +- docs/en/cli-reference/storage/create/b2.md | 62 +- docs/en/cli-reference/storage/create/box.md | 21 +- docs/en/cli-reference/storage/create/drive.md | 136 +- .../cli-reference/storage/create/dropbox.md | 31 +- .../cli-reference/storage/create/fichier.md | 10 +- .../storage/create/filefabric.md | 6 +- docs/en/cli-reference/storage/create/ftp.md | 17 +- docs/en/cli-reference/storage/create/gcs.md | 34 +- .../cli-reference/storage/create/gphotos.md | 76 +- docs/en/cli-reference/storage/create/hdfs.md | 18 +- .../cli-reference/storage/create/hidrive.md | 6 +- docs/en/cli-reference/storage/create/http.md | 16 +- .../storage/create/internetarchive.md | 6 +- .../storage/create/jottacloud.md | 36 +- .../storage/create/koofr/digistorage.md | 16 +- .../storage/create/koofr/koofr.md | 16 +- .../storage/create/koofr/other.md | 12 +- docs/en/cli-reference/storage/create/local.md | 56 + .../en/cli-reference/storage/create/mailru.md | 42 +- docs/en/cli-reference/storage/create/mega.md | 16 +- .../storage/create/netstorage.md | 8 +- .../cli-reference/storage/create/onedrive.md | 95 +- .../storage/create/oos/README.md | 2 + .../storage/create/oos/env_auth.md | 43 +- .../create/oos/instance_principal_auth.md | 43 +- .../storage/create/oos/no_auth.md | 43 +- .../create/oos/resource_principal_auth.md | 43 +- .../storage/create/oos/user_principal_auth.md | 43 +- .../create/oos/workload_identity_auth.md | 250 + .../cli-reference/storage/create/opendrive.md | 10 +- .../en/cli-reference/storage/create/pcloud.md | 6 +- .../storage/create/premiumizeme.md | 40 +- docs/en/cli-reference/storage/create/putio.md | 38 +- .../cli-reference/storage/create/qingstor.md | 6 +- .../cli-reference/storage/create/s3/README.md | 11 +- .../storage/create/s3/alibaba.md | 192 +- .../storage/create/s3/arvancloud.md | 200 +- .../en/cli-reference/storage/create/s3/aws.md | 213 +- .../cli-reference/storage/create/s3/ceph.md | 200 +- .../storage/create/s3/chinamobile.md | 200 +- .../storage/create/s3/cloudflare.md | 192 +- .../storage/create/s3/digitalocean.md | 192 +- .../storage/create/s3/dreamhost.md | 192 +- .../en/cli-reference/storage/create/s3/gcs.md | 579 ++ .../storage/create/s3/huaweiobs.md | 192 +- .../cli-reference/storage/create/s3/ibmcos.md | 192 +- .../cli-reference/storage/create/s3/idrive.md | 192 +- .../cli-reference/storage/create/s3/ionos.md | 192 +- .../cli-reference/storage/create/s3/leviia.md | 572 ++ .../cli-reference/storage/create/s3/liara.md | 192 +- .../cli-reference/storage/create/s3/linode.md | 570 ++ .../storage/create/s3/lyvecloud.md | 192 +- .../cli-reference/storage/create/s3/magalu.md | 571 ++ .../cli-reference/storage/create/s3/minio.md | 200 +- .../storage/create/s3/netease.md | 192 +- .../cli-reference/storage/create/s3/other.md | 192 +- .../storage/create/s3/petabox.md | 580 ++ .../cli-reference/storage/create/s3/qiniu.md | 192 +- .../storage/create/s3/rackcorp.md | 192 +- .../cli-reference/storage/create/s3/rclone.md | 578 ++ .../storage/create/s3/scaleway.md | 207 +- .../storage/create/s3/seaweedfs.md | 192 +- .../storage/create/s3/stackpath.md | 192 +- .../cli-reference/storage/create/s3/storj.md | 192 +- .../storage/create/s3/synology.md | 568 ++ .../storage/create/s3/tencentcos.md | 192 +- .../cli-reference/storage/create/s3/wasabi.md | 192 +- .../cli-reference/storage/create/seafile.md | 10 +- docs/en/cli-reference/storage/create/sftp.md | 129 +- .../cli-reference/storage/create/sharefile.md | 34 +- docs/en/cli-reference/storage/create/sia.md | 10 +- docs/en/cli-reference/storage/create/smb.md | 6 +- .../storage/create/storj/existing.md | 9 +- .../cli-reference/storage/create/storj/new.md | 9 +- .../cli-reference/storage/create/sugarsync.md | 6 +- docs/en/cli-reference/storage/create/swift.md | 114 +- docs/en/cli-reference/storage/create/union.md | 6 +- .../cli-reference/storage/create/uptobox.md | 12 +- .../en/cli-reference/storage/create/webdav.md | 34 +- .../en/cli-reference/storage/create/yandex.md | 20 +- docs/en/cli-reference/storage/create/zoho.md | 14 +- .../en/cli-reference/storage/update/README.md | 5 +- docs/en/cli-reference/storage/update/acd.md | 119 - .../cli-reference/storage/update/azureblob.md | 47 +- docs/en/cli-reference/storage/update/b2.md | 62 +- docs/en/cli-reference/storage/update/box.md | 21 +- docs/en/cli-reference/storage/update/drive.md | 136 +- .../cli-reference/storage/update/dropbox.md | 31 +- .../cli-reference/storage/update/fichier.md | 10 +- .../storage/update/filefabric.md | 6 +- docs/en/cli-reference/storage/update/ftp.md | 17 +- docs/en/cli-reference/storage/update/gcs.md | 34 +- .../cli-reference/storage/update/gphotos.md | 76 +- docs/en/cli-reference/storage/update/hdfs.md | 18 +- .../cli-reference/storage/update/hidrive.md | 6 +- docs/en/cli-reference/storage/update/http.md | 16 +- .../storage/update/internetarchive.md | 6 +- .../storage/update/jottacloud.md | 36 +- .../storage/update/koofr/digistorage.md | 16 +- .../storage/update/koofr/koofr.md | 16 +- .../storage/update/koofr/other.md | 12 +- docs/en/cli-reference/storage/update/local.md | 56 + .../en/cli-reference/storage/update/mailru.md | 42 +- docs/en/cli-reference/storage/update/mega.md | 16 +- .../storage/update/netstorage.md | 8 +- .../cli-reference/storage/update/onedrive.md | 95 +- .../storage/update/oos/README.md | 2 + .../storage/update/oos/env_auth.md | 43 +- .../update/oos/instance_principal_auth.md | 43 +- .../storage/update/oos/no_auth.md | 43 +- .../update/oos/resource_principal_auth.md | 43 +- .../storage/update/oos/user_principal_auth.md | 43 +- .../update/oos/workload_identity_auth.md | 245 + .../cli-reference/storage/update/opendrive.md | 10 +- .../en/cli-reference/storage/update/pcloud.md | 6 +- .../storage/update/premiumizeme.md | 40 +- docs/en/cli-reference/storage/update/putio.md | 38 +- .../cli-reference/storage/update/qingstor.md | 6 +- .../cli-reference/storage/update/s3/README.md | 11 +- .../storage/update/s3/alibaba.md | 192 +- .../storage/update/s3/arvancloud.md | 200 +- .../en/cli-reference/storage/update/s3/aws.md | 213 +- .../cli-reference/storage/update/s3/ceph.md | 200 +- .../storage/update/s3/chinamobile.md | 200 +- .../storage/update/s3/cloudflare.md | 192 +- .../storage/update/s3/digitalocean.md | 192 +- .../storage/update/s3/dreamhost.md | 192 +- .../en/cli-reference/storage/update/s3/gcs.md | 574 ++ .../storage/update/s3/huaweiobs.md | 192 +- .../cli-reference/storage/update/s3/ibmcos.md | 192 +- .../cli-reference/storage/update/s3/idrive.md | 192 +- .../cli-reference/storage/update/s3/ionos.md | 192 +- .../cli-reference/storage/update/s3/leviia.md | 567 ++ .../cli-reference/storage/update/s3/liara.md | 192 +- .../cli-reference/storage/update/s3/linode.md | 565 ++ .../storage/update/s3/lyvecloud.md | 192 +- .../cli-reference/storage/update/s3/magalu.md | 566 ++ .../cli-reference/storage/update/s3/minio.md | 200 +- .../storage/update/s3/netease.md | 192 +- .../cli-reference/storage/update/s3/other.md | 192 +- .../storage/update/s3/petabox.md | 575 ++ .../cli-reference/storage/update/s3/qiniu.md | 192 +- .../storage/update/s3/rackcorp.md | 192 +- .../cli-reference/storage/update/s3/rclone.md | 573 ++ .../storage/update/s3/scaleway.md | 207 +- .../storage/update/s3/seaweedfs.md | 192 +- .../storage/update/s3/stackpath.md | 192 +- .../cli-reference/storage/update/s3/storj.md | 192 +- .../storage/update/s3/synology.md | 563 ++ .../storage/update/s3/tencentcos.md | 192 +- .../cli-reference/storage/update/s3/wasabi.md | 192 +- .../cli-reference/storage/update/seafile.md | 10 +- docs/en/cli-reference/storage/update/sftp.md | 129 +- .../cli-reference/storage/update/sharefile.md | 34 +- docs/en/cli-reference/storage/update/sia.md | 10 +- docs/en/cli-reference/storage/update/smb.md | 6 +- .../storage/update/storj/existing.md | 9 +- .../cli-reference/storage/update/storj/new.md | 9 +- .../cli-reference/storage/update/sugarsync.md | 6 +- docs/en/cli-reference/storage/update/swift.md | 114 +- docs/en/cli-reference/storage/update/union.md | 6 +- .../cli-reference/storage/update/uptobox.md | 12 +- .../en/cli-reference/storage/update/webdav.md | 34 +- .../en/cli-reference/storage/update/yandex.md | 20 +- docs/en/cli-reference/storage/update/zoho.md | 14 +- docs/en/web-api-reference/piece.md | 4 + docs/en/web-api-reference/storage.md | 36 +- docs/swagger/docs.go | 7677 +++++++++++++---- docs/swagger/swagger.json | 7676 ++++++++++++---- docs/swagger/swagger.yaml | 3820 +++++++- handler/storage/types_gen.go | 1379 ++- 437 files changed, 44823 insertions(+), 8778 deletions(-) create mode 100644 client/swagger/http/piece/delete_piece_parameters.go create mode 100644 client/swagger/http/piece/delete_piece_responses.go delete mode 100644 client/swagger/http/storage/create_acd_storage_parameters.go delete mode 100644 client/swagger/http/storage/create_acd_storage_responses.go create mode 100644 client/swagger/http/storage/create_oos_workload_identity_auth_storage_parameters.go create mode 100644 client/swagger/http/storage/create_oos_workload_identity_auth_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_g_c_s_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_g_c_s_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_leviia_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_leviia_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_linode_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_linode_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_magalu_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_magalu_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_petabox_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_petabox_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_rclone_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_rclone_storage_responses.go create mode 100644 client/swagger/http/storage/create_s3_synology_storage_parameters.go create mode 100644 client/swagger/http/storage/create_s3_synology_storage_responses.go create mode 100644 client/swagger/models/dataprep_delete_piece_request.go delete mode 100644 client/swagger/models/storage_acd_config.go create mode 100644 client/swagger/models/storage_create_oos_workload_identity_auth_storage_request.go create mode 100644 client/swagger/models/storage_create_s3_g_c_s_storage_request.go create mode 100644 client/swagger/models/storage_create_s3_leviia_storage_request.go create mode 100644 client/swagger/models/storage_create_s3_linode_storage_request.go create mode 100644 client/swagger/models/storage_create_s3_magalu_storage_request.go rename client/swagger/models/{storage_create_acd_storage_request.go => storage_create_s3_petabox_storage_request.go} (63%) create mode 100644 client/swagger/models/storage_create_s3_rclone_storage_request.go create mode 100644 client/swagger/models/storage_create_s3_synology_storage_request.go create mode 100644 client/swagger/models/storage_oos_workload_identity_auth_config.go create mode 100644 client/swagger/models/storage_s3_g_c_s_config.go create mode 100644 client/swagger/models/storage_s3_leviia_config.go create mode 100644 client/swagger/models/storage_s3_linode_config.go create mode 100644 client/swagger/models/storage_s3_magalu_config.go create mode 100644 client/swagger/models/storage_s3_petabox_config.go create mode 100644 client/swagger/models/storage_s3_rclone_config.go create mode 100644 client/swagger/models/storage_s3_synology_config.go create mode 100644 docs/en/cli-reference/prep/delete-piece.md delete mode 100644 docs/en/cli-reference/storage/create/acd.md create mode 100644 docs/en/cli-reference/storage/create/oos/workload_identity_auth.md create mode 100644 docs/en/cli-reference/storage/create/s3/gcs.md create mode 100644 docs/en/cli-reference/storage/create/s3/leviia.md create mode 100644 docs/en/cli-reference/storage/create/s3/linode.md create mode 100644 docs/en/cli-reference/storage/create/s3/magalu.md create mode 100644 docs/en/cli-reference/storage/create/s3/petabox.md create mode 100644 docs/en/cli-reference/storage/create/s3/rclone.md create mode 100644 docs/en/cli-reference/storage/create/s3/synology.md delete mode 100644 docs/en/cli-reference/storage/update/acd.md create mode 100644 docs/en/cli-reference/storage/update/oos/workload_identity_auth.md create mode 100644 docs/en/cli-reference/storage/update/s3/gcs.md create mode 100644 docs/en/cli-reference/storage/update/s3/leviia.md create mode 100644 docs/en/cli-reference/storage/update/s3/linode.md create mode 100644 docs/en/cli-reference/storage/update/s3/magalu.md create mode 100644 docs/en/cli-reference/storage/update/s3/petabox.md create mode 100644 docs/en/cli-reference/storage/update/s3/rclone.md create mode 100644 docs/en/cli-reference/storage/update/s3/synology.md diff --git a/client/swagger/http/admin/admin_client.go b/client/swagger/http/admin/admin_client.go index 56da6fc6..3a4df323 100644 --- a/client/swagger/http/admin/admin_client.go +++ b/client/swagger/http/admin/admin_client.go @@ -65,7 +65,7 @@ type ClientService interface { SetIdentity sets the user identity for tracking purpose */ func (a *Client) SetIdentity(params *SetIdentityParams, opts ...ClientOption) (*SetIdentityNoContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewSetIdentityParams() } @@ -84,17 +84,22 @@ func (a *Client) SetIdentity(params *SetIdentityParams, opts ...ClientOption) (* for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*SetIdentityNoContent) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for SetIdentity: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/admin/set_identity_responses.go b/client/swagger/http/admin/set_identity_responses.go index dc21e454..88bf43b5 100644 --- a/client/swagger/http/admin/set_identity_responses.go +++ b/client/swagger/http/admin/set_identity_responses.go @@ -7,6 +7,7 @@ package admin import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type SetIdentityReader struct { } // ReadResponse reads a server response into the received o. -func (o *SetIdentityReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *SetIdentityReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 204: result := NewSetIdentityNoContent() @@ -166,7 +167,7 @@ func (o *SetIdentityBadRequest) readResponse(response runtime.ClientResponse, co o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -236,7 +237,7 @@ func (o *SetIdentityInternalServerError) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal/deal_client.go b/client/swagger/http/deal/deal_client.go index 3d63ead6..70f6c1e2 100644 --- a/client/swagger/http/deal/deal_client.go +++ b/client/swagger/http/deal/deal_client.go @@ -69,7 +69,7 @@ ListDeals lists all deals List all deals */ func (a *Client) ListDeals(params *ListDealsParams, opts ...ClientOption) (*ListDealsOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListDealsParams() } @@ -88,17 +88,22 @@ func (a *Client) ListDeals(params *ListDealsParams, opts ...ClientOption) (*List for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListDealsOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListDeals: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -109,7 +114,7 @@ SendManual sends a manual deal proposal Send a manual deal proposal */ func (a *Client) SendManual(params *SendManualParams, opts ...ClientOption) (*SendManualOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewSendManualParams() } @@ -128,17 +133,22 @@ func (a *Client) SendManual(params *SendManualParams, opts ...ClientOption) (*Se for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*SendManualOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for SendManual: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/deal/list_deals_responses.go b/client/swagger/http/deal/list_deals_responses.go index 92f90135..c8b5534a 100644 --- a/client/swagger/http/deal/list_deals_responses.go +++ b/client/swagger/http/deal/list_deals_responses.go @@ -7,6 +7,7 @@ package deal import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListDealsReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListDealsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListDealsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListDealsOK() @@ -108,7 +109,7 @@ func (o *ListDealsOK) GetPayload() []*models.ModelDeal { func (o *ListDealsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListDealsBadRequest) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListDealsInternalServerError) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal/send_manual_responses.go b/client/swagger/http/deal/send_manual_responses.go index 2d2b21a3..873759d9 100644 --- a/client/swagger/http/deal/send_manual_responses.go +++ b/client/swagger/http/deal/send_manual_responses.go @@ -7,6 +7,7 @@ package deal import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type SendManualReader struct { } // ReadResponse reads a server response into the received o. -func (o *SendManualReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *SendManualReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewSendManualOK() @@ -110,7 +111,7 @@ func (o *SendManualOK) readResponse(response runtime.ClientResponse, consumer ru o.Payload = new(models.ModelDeal) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *SendManualBadRequest) readResponse(response runtime.ClientResponse, con o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *SendManualInternalServerError) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/create_schedule_responses.go b/client/swagger/http/deal_schedule/create_schedule_responses.go index d42ff2f6..99d8a45e 100644 --- a/client/swagger/http/deal_schedule/create_schedule_responses.go +++ b/client/swagger/http/deal_schedule/create_schedule_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateScheduleReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateScheduleOK() @@ -110,7 +111,7 @@ func (o *CreateScheduleOK) readResponse(response runtime.ClientResponse, consume o.Payload = new(models.ModelSchedule) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateScheduleBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateScheduleInternalServerError) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/deal_schedule_client.go b/client/swagger/http/deal_schedule/deal_schedule_client.go index a59b9e3c..22eb3335 100644 --- a/client/swagger/http/deal_schedule/deal_schedule_client.go +++ b/client/swagger/http/deal_schedule/deal_schedule_client.go @@ -79,7 +79,7 @@ CreateSchedule creates a new schedule Create a new schedule */ func (a *Client) CreateSchedule(params *CreateScheduleParams, opts ...ClientOption) (*CreateScheduleOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateScheduleParams() } @@ -98,17 +98,22 @@ func (a *Client) CreateSchedule(params *CreateScheduleParams, opts ...ClientOpti for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateScheduleOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSchedule: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -117,7 +122,7 @@ func (a *Client) CreateSchedule(params *CreateScheduleParams, opts ...ClientOpti ListPreparationSchedules lists all schedules for a preparation */ func (a *Client) ListPreparationSchedules(params *ListPreparationSchedulesParams, opts ...ClientOption) (*ListPreparationSchedulesOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListPreparationSchedulesParams() } @@ -136,17 +141,22 @@ func (a *Client) ListPreparationSchedules(params *ListPreparationSchedulesParams for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListPreparationSchedulesOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListPreparationSchedules: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -155,7 +165,7 @@ func (a *Client) ListPreparationSchedules(params *ListPreparationSchedulesParams ListSchedules lists all deal making schedules */ func (a *Client) ListSchedules(params *ListSchedulesParams, opts ...ClientOption) (*ListSchedulesOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListSchedulesParams() } @@ -174,17 +184,22 @@ func (a *Client) ListSchedules(params *ListSchedulesParams, opts ...ClientOption for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListSchedulesOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListSchedules: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -193,7 +208,7 @@ func (a *Client) ListSchedules(params *ListSchedulesParams, opts ...ClientOption PauseSchedule pauses a specific schedule */ func (a *Client) PauseSchedule(params *PauseScheduleParams, opts ...ClientOption) (*PauseScheduleOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPauseScheduleParams() } @@ -212,17 +227,22 @@ func (a *Client) PauseSchedule(params *PauseScheduleParams, opts ...ClientOption for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PauseScheduleOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PauseSchedule: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -231,7 +251,7 @@ func (a *Client) PauseSchedule(params *PauseScheduleParams, opts ...ClientOption RemoveSchedule deletes a specific schedule */ func (a *Client) RemoveSchedule(params *RemoveScheduleParams, opts ...ClientOption) (*RemoveScheduleNoContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRemoveScheduleParams() } @@ -250,17 +270,22 @@ func (a *Client) RemoveSchedule(params *RemoveScheduleParams, opts ...ClientOpti for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RemoveScheduleNoContent) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RemoveSchedule: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -269,7 +294,7 @@ func (a *Client) RemoveSchedule(params *RemoveScheduleParams, opts ...ClientOpti ResumeSchedule resumes a specific schedule */ func (a *Client) ResumeSchedule(params *ResumeScheduleParams, opts ...ClientOption) (*ResumeScheduleOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewResumeScheduleParams() } @@ -288,17 +313,22 @@ func (a *Client) ResumeSchedule(params *ResumeScheduleParams, opts ...ClientOpti for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ResumeScheduleOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ResumeSchedule: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -309,7 +339,7 @@ UpdateSchedule updates a schedule Update a schedule */ func (a *Client) UpdateSchedule(params *UpdateScheduleParams, opts ...ClientOption) (*UpdateScheduleOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewUpdateScheduleParams() } @@ -328,17 +358,22 @@ func (a *Client) UpdateSchedule(params *UpdateScheduleParams, opts ...ClientOpti for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*UpdateScheduleOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for UpdateSchedule: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/deal_schedule/list_preparation_schedules_responses.go b/client/swagger/http/deal_schedule/list_preparation_schedules_responses.go index 870dcb39..d5cd3399 100644 --- a/client/swagger/http/deal_schedule/list_preparation_schedules_responses.go +++ b/client/swagger/http/deal_schedule/list_preparation_schedules_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListPreparationSchedulesReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListPreparationSchedulesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListPreparationSchedulesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListPreparationSchedulesOK() @@ -108,7 +109,7 @@ func (o *ListPreparationSchedulesOK) GetPayload() []*models.ModelSchedule { func (o *ListPreparationSchedulesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListPreparationSchedulesBadRequest) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListPreparationSchedulesInternalServerError) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/list_schedules_responses.go b/client/swagger/http/deal_schedule/list_schedules_responses.go index 7ff9d8c2..77c915c2 100644 --- a/client/swagger/http/deal_schedule/list_schedules_responses.go +++ b/client/swagger/http/deal_schedule/list_schedules_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListSchedulesReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListSchedulesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListSchedulesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListSchedulesOK() @@ -108,7 +109,7 @@ func (o *ListSchedulesOK) GetPayload() []*models.ModelSchedule { func (o *ListSchedulesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListSchedulesBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListSchedulesInternalServerError) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/pause_schedule_responses.go b/client/swagger/http/deal_schedule/pause_schedule_responses.go index 4ce80fe7..64a0f976 100644 --- a/client/swagger/http/deal_schedule/pause_schedule_responses.go +++ b/client/swagger/http/deal_schedule/pause_schedule_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type PauseScheduleReader struct { } // ReadResponse reads a server response into the received o. -func (o *PauseScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PauseScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPauseScheduleOK() @@ -110,7 +111,7 @@ func (o *PauseScheduleOK) readResponse(response runtime.ClientResponse, consumer o.Payload = new(models.ModelSchedule) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *PauseScheduleBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *PauseScheduleInternalServerError) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/remove_schedule_responses.go b/client/swagger/http/deal_schedule/remove_schedule_responses.go index 8c92e07c..b61586ec 100644 --- a/client/swagger/http/deal_schedule/remove_schedule_responses.go +++ b/client/swagger/http/deal_schedule/remove_schedule_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RemoveScheduleReader struct { } // ReadResponse reads a server response into the received o. -func (o *RemoveScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RemoveScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 204: result := NewRemoveScheduleNoContent() @@ -166,7 +167,7 @@ func (o *RemoveScheduleBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -236,7 +237,7 @@ func (o *RemoveScheduleInternalServerError) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/resume_schedule_responses.go b/client/swagger/http/deal_schedule/resume_schedule_responses.go index e04d6496..254df485 100644 --- a/client/swagger/http/deal_schedule/resume_schedule_responses.go +++ b/client/swagger/http/deal_schedule/resume_schedule_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ResumeScheduleReader struct { } // ReadResponse reads a server response into the received o. -func (o *ResumeScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ResumeScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewResumeScheduleOK() @@ -110,7 +111,7 @@ func (o *ResumeScheduleOK) readResponse(response runtime.ClientResponse, consume o.Payload = new(models.ModelSchedule) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *ResumeScheduleBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *ResumeScheduleInternalServerError) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/deal_schedule/update_schedule_responses.go b/client/swagger/http/deal_schedule/update_schedule_responses.go index 6a085515..e38d95e7 100644 --- a/client/swagger/http/deal_schedule/update_schedule_responses.go +++ b/client/swagger/http/deal_schedule/update_schedule_responses.go @@ -7,6 +7,7 @@ package deal_schedule import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type UpdateScheduleReader struct { } // ReadResponse reads a server response into the received o. -func (o *UpdateScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *UpdateScheduleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewUpdateScheduleOK() @@ -110,7 +111,7 @@ func (o *UpdateScheduleOK) readResponse(response runtime.ClientResponse, consume o.Payload = new(models.ModelSchedule) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *UpdateScheduleBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *UpdateScheduleInternalServerError) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/file/file_client.go b/client/swagger/http/file/file_client.go index 3540b600..e4471877 100644 --- a/client/swagger/http/file/file_client.go +++ b/client/swagger/http/file/file_client.go @@ -98,7 +98,7 @@ type ClientService interface { GetFile gets details about a file */ func (a *Client) GetFile(params *GetFileParams, opts ...ClientOption) (*GetFileOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewGetFileParams() } @@ -117,17 +117,22 @@ func (a *Client) GetFile(params *GetFileParams, opts ...ClientOption) (*GetFileO for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*GetFileOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for GetFile: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -136,7 +141,7 @@ func (a *Client) GetFile(params *GetFileParams, opts ...ClientOption) (*GetFileO GetFileDeals gets all deals that have been made for a file */ func (a *Client) GetFileDeals(params *GetFileDealsParams, opts ...ClientOption) (*GetFileDealsOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewGetFileDealsParams() } @@ -155,17 +160,22 @@ func (a *Client) GetFileDeals(params *GetFileDealsParams, opts ...ClientOption) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*GetFileDealsOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for GetFileDeals: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -174,7 +184,7 @@ func (a *Client) GetFileDeals(params *GetFileDealsParams, opts ...ClientOption) PrepareToPackFile prepares job for a given item */ func (a *Client) PrepareToPackFile(params *PrepareToPackFileParams, opts ...ClientOption) (*PrepareToPackFileOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPrepareToPackFileParams() } @@ -193,17 +203,22 @@ func (a *Client) PrepareToPackFile(params *PrepareToPackFileParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PrepareToPackFileOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PrepareToPackFile: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -214,7 +229,7 @@ PushFile pushes a file to be queued Tells Singularity that something is ready to be grabbed for data preparation */ func (a *Client) PushFile(params *PushFileParams, opts ...ClientOption) (*PushFileOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPushFileParams() } @@ -233,17 +248,22 @@ func (a *Client) PushFile(params *PushFileParams, opts ...ClientOption) (*PushFi for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PushFileOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PushFile: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -252,7 +272,7 @@ func (a *Client) PushFile(params *PushFileParams, opts ...ClientOption) (*PushFi RetrieveFile gets content of a file */ func (a *Client) RetrieveFile(params *RetrieveFileParams, writer io.Writer, opts ...ClientOption) (*RetrieveFileOK, *RetrieveFilePartialContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRetrieveFileParams() } @@ -271,18 +291,22 @@ func (a *Client) RetrieveFile(params *RetrieveFileParams, writer io.Writer, opts for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, nil, err } + + // several success responses have to be checked switch value := result.(type) { case *RetrieveFileOK: return value, nil, nil case *RetrieveFilePartialContent: return nil, value, nil } - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for file: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/file/get_file_deals_responses.go b/client/swagger/http/file/get_file_deals_responses.go index 230db68d..b849cb10 100644 --- a/client/swagger/http/file/get_file_deals_responses.go +++ b/client/swagger/http/file/get_file_deals_responses.go @@ -7,6 +7,7 @@ package file import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type GetFileDealsReader struct { } // ReadResponse reads a server response into the received o. -func (o *GetFileDealsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *GetFileDealsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewGetFileDealsOK() @@ -102,7 +103,7 @@ func (o *GetFileDealsOK) GetPayload() []*models.FileDealsForFileRange { func (o *GetFileDealsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -172,7 +173,7 @@ func (o *GetFileDealsInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/file/get_file_responses.go b/client/swagger/http/file/get_file_responses.go index a0b07de0..9a511af4 100644 --- a/client/swagger/http/file/get_file_responses.go +++ b/client/swagger/http/file/get_file_responses.go @@ -7,6 +7,7 @@ package file import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type GetFileReader struct { } // ReadResponse reads a server response into the received o. -func (o *GetFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *GetFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewGetFileOK() @@ -104,7 +105,7 @@ func (o *GetFileOK) readResponse(response runtime.ClientResponse, consumer runti o.Payload = new(models.ModelFile) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -174,7 +175,7 @@ func (o *GetFileInternalServerError) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/file/prepare_to_pack_file_responses.go b/client/swagger/http/file/prepare_to_pack_file_responses.go index 91e3f7ec..d56caa99 100644 --- a/client/swagger/http/file/prepare_to_pack_file_responses.go +++ b/client/swagger/http/file/prepare_to_pack_file_responses.go @@ -7,6 +7,7 @@ package file import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -20,7 +21,7 @@ type PrepareToPackFileReader struct { } // ReadResponse reads a server response into the received o. -func (o *PrepareToPackFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PrepareToPackFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPrepareToPackFileOK() @@ -106,7 +107,7 @@ func (o *PrepareToPackFileOK) GetPayload() int64 { func (o *PrepareToPackFileOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -174,7 +175,7 @@ func (o *PrepareToPackFileBadRequest) GetPayload() string { func (o *PrepareToPackFileBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -242,7 +243,7 @@ func (o *PrepareToPackFileInternalServerError) GetPayload() string { func (o *PrepareToPackFileInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/file/push_file_responses.go b/client/swagger/http/file/push_file_responses.go index 9626893a..9171756c 100644 --- a/client/swagger/http/file/push_file_responses.go +++ b/client/swagger/http/file/push_file_responses.go @@ -7,6 +7,7 @@ package file import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type PushFileReader struct { } // ReadResponse reads a server response into the received o. -func (o *PushFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PushFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPushFileOK() @@ -110,7 +111,7 @@ func (o *PushFileOK) readResponse(response runtime.ClientResponse, consumer runt o.Payload = new(models.ModelFile) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *PushFileBadRequest) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *PushFileInternalServerError) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/file/retrieve_file_responses.go b/client/swagger/http/file/retrieve_file_responses.go index 12ca9cae..17ff2cd1 100644 --- a/client/swagger/http/file/retrieve_file_responses.go +++ b/client/swagger/http/file/retrieve_file_responses.go @@ -7,6 +7,7 @@ package file import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -23,7 +24,7 @@ type RetrieveFileReader struct { } // ReadResponse reads a server response into the received o. -func (o *RetrieveFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RetrieveFileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewRetrieveFileOK(o.writer) @@ -122,7 +123,7 @@ func (o *RetrieveFileOK) GetPayload() io.Writer { func (o *RetrieveFileOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -191,7 +192,7 @@ func (o *RetrieveFilePartialContent) GetPayload() io.Writer { func (o *RetrieveFilePartialContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -261,7 +262,7 @@ func (o *RetrieveFileBadRequest) readResponse(response runtime.ClientResponse, c o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -331,7 +332,7 @@ func (o *RetrieveFileNotFound) readResponse(response runtime.ClientResponse, con o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -401,7 +402,7 @@ func (o *RetrieveFileInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/job_client.go b/client/swagger/http/job/job_client.go index 2c91f8dc..474864ac 100644 --- a/client/swagger/http/job/job_client.go +++ b/client/swagger/http/job/job_client.go @@ -79,7 +79,7 @@ type ClientService interface { Pack packs a pack job into car files */ func (a *Client) Pack(params *PackParams, opts ...ClientOption) (*PackOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPackParams() } @@ -98,17 +98,22 @@ func (a *Client) Pack(params *PackParams, opts ...ClientOption) (*PackOK, error) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PackOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for Pack: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -117,7 +122,7 @@ func (a *Client) Pack(params *PackParams, opts ...ClientOption) (*PackOK, error) PauseDagGen pauses an ongoing d a g generation job */ func (a *Client) PauseDagGen(params *PauseDagGenParams, opts ...ClientOption) (*PauseDagGenOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPauseDagGenParams() } @@ -136,17 +141,22 @@ func (a *Client) PauseDagGen(params *PauseDagGenParams, opts ...ClientOption) (* for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PauseDagGenOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PauseDagGen: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -155,7 +165,7 @@ func (a *Client) PauseDagGen(params *PauseDagGenParams, opts ...ClientOption) (* PausePack pauses a specific packing job */ func (a *Client) PausePack(params *PausePackParams, opts ...ClientOption) (*PausePackOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPausePackParams() } @@ -174,17 +184,22 @@ func (a *Client) PausePack(params *PausePackParams, opts ...ClientOption) (*Paus for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PausePackOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PausePack: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -193,7 +208,7 @@ func (a *Client) PausePack(params *PausePackParams, opts ...ClientOption) (*Paus PauseScan pauses an ongoing scanning job */ func (a *Client) PauseScan(params *PauseScanParams, opts ...ClientOption) (*PauseScanOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPauseScanParams() } @@ -212,17 +227,22 @@ func (a *Client) PauseScan(params *PauseScanParams, opts ...ClientOption) (*Paus for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PauseScanOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PauseScan: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -231,7 +251,7 @@ func (a *Client) PauseScan(params *PauseScanParams, opts ...ClientOption) (*Paus PrepareToPackSource prepares to pack a data source */ func (a *Client) PrepareToPackSource(params *PrepareToPackSourceParams, opts ...ClientOption) (*PrepareToPackSourceNoContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewPrepareToPackSourceParams() } @@ -250,17 +270,22 @@ func (a *Client) PrepareToPackSource(params *PrepareToPackSourceParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*PrepareToPackSourceNoContent) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for PrepareToPackSource: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -269,7 +294,7 @@ func (a *Client) PrepareToPackSource(params *PrepareToPackSourceParams, opts ... StartDagGen starts a new d a g generation job */ func (a *Client) StartDagGen(params *StartDagGenParams, opts ...ClientOption) (*StartDagGenOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewStartDagGenParams() } @@ -288,17 +313,22 @@ func (a *Client) StartDagGen(params *StartDagGenParams, opts ...ClientOption) (* for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*StartDagGenOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for StartDagGen: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -307,7 +337,7 @@ func (a *Client) StartDagGen(params *StartDagGenParams, opts ...ClientOption) (* StartPack starts or restart a specific packing job */ func (a *Client) StartPack(params *StartPackParams, opts ...ClientOption) (*StartPackOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewStartPackParams() } @@ -326,17 +356,22 @@ func (a *Client) StartPack(params *StartPackParams, opts ...ClientOption) (*Star for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*StartPackOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for StartPack: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -345,7 +380,7 @@ func (a *Client) StartPack(params *StartPackParams, opts ...ClientOption) (*Star StartScan starts a new scanning job */ func (a *Client) StartScan(params *StartScanParams, opts ...ClientOption) (*StartScanOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewStartScanParams() } @@ -364,17 +399,22 @@ func (a *Client) StartScan(params *StartScanParams, opts ...ClientOption) (*Star for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*StartScanOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for StartScan: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/job/pack_responses.go b/client/swagger/http/job/pack_responses.go index 73848734..2f3c694c 100644 --- a/client/swagger/http/job/pack_responses.go +++ b/client/swagger/http/job/pack_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type PackReader struct { } // ReadResponse reads a server response into the received o. -func (o *PackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPackOK() @@ -110,7 +111,7 @@ func (o *PackOK) readResponse(response runtime.ClientResponse, consumer runtime. o.Payload = new(models.ModelCar) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *PackBadRequest) GetPayload() string { func (o *PackBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -246,7 +247,7 @@ func (o *PackInternalServerError) GetPayload() string { func (o *PackInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/pause_dag_gen_responses.go b/client/swagger/http/job/pause_dag_gen_responses.go index f3904cce..25819225 100644 --- a/client/swagger/http/job/pause_dag_gen_responses.go +++ b/client/swagger/http/job/pause_dag_gen_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type PauseDagGenReader struct { } // ReadResponse reads a server response into the received o. -func (o *PauseDagGenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PauseDagGenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPauseDagGenOK() @@ -110,7 +111,7 @@ func (o *PauseDagGenOK) readResponse(response runtime.ClientResponse, consumer r o.Payload = new(models.ModelJob) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *PauseDagGenBadRequest) readResponse(response runtime.ClientResponse, co o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *PauseDagGenInternalServerError) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/pause_pack_responses.go b/client/swagger/http/job/pause_pack_responses.go index 5c58369e..32bbf903 100644 --- a/client/swagger/http/job/pause_pack_responses.go +++ b/client/swagger/http/job/pause_pack_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type PausePackReader struct { } // ReadResponse reads a server response into the received o. -func (o *PausePackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PausePackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPausePackOK() @@ -108,7 +109,7 @@ func (o *PausePackOK) GetPayload() []*models.ModelJob { func (o *PausePackOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *PausePackBadRequest) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *PausePackInternalServerError) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/pause_scan_responses.go b/client/swagger/http/job/pause_scan_responses.go index 674116cf..af6f0bd0 100644 --- a/client/swagger/http/job/pause_scan_responses.go +++ b/client/swagger/http/job/pause_scan_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type PauseScanReader struct { } // ReadResponse reads a server response into the received o. -func (o *PauseScanReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PauseScanReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewPauseScanOK() @@ -110,7 +111,7 @@ func (o *PauseScanOK) readResponse(response runtime.ClientResponse, consumer run o.Payload = new(models.ModelJob) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *PauseScanBadRequest) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *PauseScanInternalServerError) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/prepare_to_pack_source_responses.go b/client/swagger/http/job/prepare_to_pack_source_responses.go index ffdce4ea..11539600 100644 --- a/client/swagger/http/job/prepare_to_pack_source_responses.go +++ b/client/swagger/http/job/prepare_to_pack_source_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -20,7 +21,7 @@ type PrepareToPackSourceReader struct { } // ReadResponse reads a server response into the received o. -func (o *PrepareToPackSourceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *PrepareToPackSourceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 204: result := NewPrepareToPackSourceNoContent() @@ -162,7 +163,7 @@ func (o *PrepareToPackSourceBadRequest) GetPayload() string { func (o *PrepareToPackSourceBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -230,7 +231,7 @@ func (o *PrepareToPackSourceInternalServerError) GetPayload() string { func (o *PrepareToPackSourceInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/start_dag_gen_responses.go b/client/swagger/http/job/start_dag_gen_responses.go index 5d2f63fd..5e200895 100644 --- a/client/swagger/http/job/start_dag_gen_responses.go +++ b/client/swagger/http/job/start_dag_gen_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type StartDagGenReader struct { } // ReadResponse reads a server response into the received o. -func (o *StartDagGenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *StartDagGenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewStartDagGenOK() @@ -110,7 +111,7 @@ func (o *StartDagGenOK) readResponse(response runtime.ClientResponse, consumer r o.Payload = new(models.ModelJob) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *StartDagGenBadRequest) readResponse(response runtime.ClientResponse, co o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *StartDagGenInternalServerError) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/start_pack_responses.go b/client/swagger/http/job/start_pack_responses.go index fcb140fc..528b14d3 100644 --- a/client/swagger/http/job/start_pack_responses.go +++ b/client/swagger/http/job/start_pack_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type StartPackReader struct { } // ReadResponse reads a server response into the received o. -func (o *StartPackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *StartPackReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewStartPackOK() @@ -108,7 +109,7 @@ func (o *StartPackOK) GetPayload() []*models.ModelJob { func (o *StartPackOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *StartPackBadRequest) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *StartPackInternalServerError) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/job/start_scan_responses.go b/client/swagger/http/job/start_scan_responses.go index 6278f9a6..73bd4195 100644 --- a/client/swagger/http/job/start_scan_responses.go +++ b/client/swagger/http/job/start_scan_responses.go @@ -7,6 +7,7 @@ package job import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type StartScanReader struct { } // ReadResponse reads a server response into the received o. -func (o *StartScanReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *StartScanReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewStartScanOK() @@ -110,7 +111,7 @@ func (o *StartScanOK) readResponse(response runtime.ClientResponse, consumer run o.Payload = new(models.ModelJob) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *StartScanBadRequest) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *StartScanInternalServerError) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/piece/add_piece_responses.go b/client/swagger/http/piece/add_piece_responses.go index 469ca325..d3eac589 100644 --- a/client/swagger/http/piece/add_piece_responses.go +++ b/client/swagger/http/piece/add_piece_responses.go @@ -7,6 +7,7 @@ package piece import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type AddPieceReader struct { } // ReadResponse reads a server response into the received o. -func (o *AddPieceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *AddPieceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewAddPieceOK() @@ -110,7 +111,7 @@ func (o *AddPieceOK) readResponse(response runtime.ClientResponse, consumer runt o.Payload = new(models.ModelCar) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *AddPieceBadRequest) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *AddPieceInternalServerError) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/piece/delete_piece_parameters.go b/client/swagger/http/piece/delete_piece_parameters.go new file mode 100644 index 00000000..2731e504 --- /dev/null +++ b/client/swagger/http/piece/delete_piece_parameters.go @@ -0,0 +1,197 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package piece + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewDeletePieceParams creates a new DeletePieceParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeletePieceParams() *DeletePieceParams { + return &DeletePieceParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeletePieceParamsWithTimeout creates a new DeletePieceParams object +// with the ability to set a timeout on a request. +func NewDeletePieceParamsWithTimeout(timeout time.Duration) *DeletePieceParams { + return &DeletePieceParams{ + timeout: timeout, + } +} + +// NewDeletePieceParamsWithContext creates a new DeletePieceParams object +// with the ability to set a context for a request. +func NewDeletePieceParamsWithContext(ctx context.Context) *DeletePieceParams { + return &DeletePieceParams{ + Context: ctx, + } +} + +// NewDeletePieceParamsWithHTTPClient creates a new DeletePieceParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeletePieceParamsWithHTTPClient(client *http.Client) *DeletePieceParams { + return &DeletePieceParams{ + HTTPClient: client, + } +} + +/* +DeletePieceParams contains all the parameters to send to the API endpoint + + for the delete piece operation. + + Typically these are written to a http.Request. +*/ +type DeletePieceParams struct { + + /* ID. + + Preparation ID or name + */ + ID string + + /* PieceCid. + + Piece CID + */ + PieceCid string + + /* Request. + + Delete options + */ + Request *models.DataprepDeletePieceRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete piece params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeletePieceParams) WithDefaults() *DeletePieceParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete piece params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeletePieceParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete piece params +func (o *DeletePieceParams) WithTimeout(timeout time.Duration) *DeletePieceParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete piece params +func (o *DeletePieceParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete piece params +func (o *DeletePieceParams) WithContext(ctx context.Context) *DeletePieceParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete piece params +func (o *DeletePieceParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete piece params +func (o *DeletePieceParams) WithHTTPClient(client *http.Client) *DeletePieceParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete piece params +func (o *DeletePieceParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the delete piece params +func (o *DeletePieceParams) WithID(id string) *DeletePieceParams { + o.SetID(id) + return o +} + +// SetID adds the id to the delete piece params +func (o *DeletePieceParams) SetID(id string) { + o.ID = id +} + +// WithPieceCid adds the pieceCid to the delete piece params +func (o *DeletePieceParams) WithPieceCid(pieceCid string) *DeletePieceParams { + o.SetPieceCid(pieceCid) + return o +} + +// SetPieceCid adds the pieceCid to the delete piece params +func (o *DeletePieceParams) SetPieceCid(pieceCid string) { + o.PieceCid = pieceCid +} + +// WithRequest adds the request to the delete piece params +func (o *DeletePieceParams) WithRequest(request *models.DataprepDeletePieceRequest) *DeletePieceParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the delete piece params +func (o *DeletePieceParams) SetRequest(request *models.DataprepDeletePieceRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *DeletePieceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + // path param piece_cid + if err := r.SetPathParam("piece_cid", o.PieceCid); err != nil { + return err + } + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/piece/delete_piece_responses.go b/client/swagger/http/piece/delete_piece_responses.go new file mode 100644 index 00000000..e824480e --- /dev/null +++ b/client/swagger/http/piece/delete_piece_responses.go @@ -0,0 +1,321 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package piece + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// DeletePieceReader is a Reader for the DeletePiece structure. +type DeletePieceReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeletePieceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 204: + result := NewDeletePieceNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeletePieceBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewDeletePieceNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeletePieceInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[DELETE /preparation/{id}/piece/{piece_cid}] DeletePiece", response, response.Code()) + } +} + +// NewDeletePieceNoContent creates a DeletePieceNoContent with default headers values +func NewDeletePieceNoContent() *DeletePieceNoContent { + return &DeletePieceNoContent{} +} + +/* +DeletePieceNoContent describes a response with status code 204, with default header values. + +No Content +*/ +type DeletePieceNoContent struct { +} + +// IsSuccess returns true when this delete piece no content response has a 2xx status code +func (o *DeletePieceNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete piece no content response has a 3xx status code +func (o *DeletePieceNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete piece no content response has a 4xx status code +func (o *DeletePieceNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete piece no content response has a 5xx status code +func (o *DeletePieceNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this delete piece no content response a status code equal to that given +func (o *DeletePieceNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the delete piece no content response +func (o *DeletePieceNoContent) Code() int { + return 204 +} + +func (o *DeletePieceNoContent) Error() string { + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceNoContent", 204) +} + +func (o *DeletePieceNoContent) String() string { + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceNoContent", 204) +} + +func (o *DeletePieceNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeletePieceBadRequest creates a DeletePieceBadRequest with default headers values +func NewDeletePieceBadRequest() *DeletePieceBadRequest { + return &DeletePieceBadRequest{} +} + +/* +DeletePieceBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type DeletePieceBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this delete piece bad request response has a 2xx status code +func (o *DeletePieceBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete piece bad request response has a 3xx status code +func (o *DeletePieceBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete piece bad request response has a 4xx status code +func (o *DeletePieceBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete piece bad request response has a 5xx status code +func (o *DeletePieceBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this delete piece bad request response a status code equal to that given +func (o *DeletePieceBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the delete piece bad request response +func (o *DeletePieceBadRequest) Code() int { + return 400 +} + +func (o *DeletePieceBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceBadRequest %s", 400, payload) +} + +func (o *DeletePieceBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceBadRequest %s", 400, payload) +} + +func (o *DeletePieceBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *DeletePieceBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewDeletePieceNotFound creates a DeletePieceNotFound with default headers values +func NewDeletePieceNotFound() *DeletePieceNotFound { + return &DeletePieceNotFound{} +} + +/* +DeletePieceNotFound describes a response with status code 404, with default header values. + +Not Found +*/ +type DeletePieceNotFound struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this delete piece not found response has a 2xx status code +func (o *DeletePieceNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete piece not found response has a 3xx status code +func (o *DeletePieceNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete piece not found response has a 4xx status code +func (o *DeletePieceNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete piece not found response has a 5xx status code +func (o *DeletePieceNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this delete piece not found response a status code equal to that given +func (o *DeletePieceNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the delete piece not found response +func (o *DeletePieceNotFound) Code() int { + return 404 +} + +func (o *DeletePieceNotFound) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceNotFound %s", 404, payload) +} + +func (o *DeletePieceNotFound) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceNotFound %s", 404, payload) +} + +func (o *DeletePieceNotFound) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *DeletePieceNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewDeletePieceInternalServerError creates a DeletePieceInternalServerError with default headers values +func NewDeletePieceInternalServerError() *DeletePieceInternalServerError { + return &DeletePieceInternalServerError{} +} + +/* +DeletePieceInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type DeletePieceInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this delete piece internal server error response has a 2xx status code +func (o *DeletePieceInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete piece internal server error response has a 3xx status code +func (o *DeletePieceInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete piece internal server error response has a 4xx status code +func (o *DeletePieceInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete piece internal server error response has a 5xx status code +func (o *DeletePieceInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this delete piece internal server error response a status code equal to that given +func (o *DeletePieceInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the delete piece internal server error response +func (o *DeletePieceInternalServerError) Code() int { + return 500 +} + +func (o *DeletePieceInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceInternalServerError %s", 500, payload) +} + +func (o *DeletePieceInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /preparation/{id}/piece/{piece_cid}][%d] deletePieceInternalServerError %s", 500, payload) +} + +func (o *DeletePieceInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *DeletePieceInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/piece/get_piece_id_metadata_responses.go b/client/swagger/http/piece/get_piece_id_metadata_responses.go index 7290581b..48c060d3 100644 --- a/client/swagger/http/piece/get_piece_id_metadata_responses.go +++ b/client/swagger/http/piece/get_piece_id_metadata_responses.go @@ -7,6 +7,7 @@ package piece import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type GetPieceIDMetadataReader struct { } // ReadResponse reads a server response into the received o. -func (o *GetPieceIDMetadataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *GetPieceIDMetadataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewGetPieceIDMetadataOK() @@ -114,7 +115,7 @@ func (o *GetPieceIDMetadataOK) GetPayload() models.StorePieceReader { func (o *GetPieceIDMetadataOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -182,7 +183,7 @@ func (o *GetPieceIDMetadataBadRequest) GetPayload() string { func (o *GetPieceIDMetadataBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *GetPieceIDMetadataNotFound) GetPayload() string { func (o *GetPieceIDMetadataNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -318,7 +319,7 @@ func (o *GetPieceIDMetadataInternalServerError) GetPayload() string { func (o *GetPieceIDMetadataInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/piece/list_pieces_responses.go b/client/swagger/http/piece/list_pieces_responses.go index 67114f49..f301bc14 100644 --- a/client/swagger/http/piece/list_pieces_responses.go +++ b/client/swagger/http/piece/list_pieces_responses.go @@ -7,6 +7,7 @@ package piece import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListPiecesReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListPiecesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListPiecesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListPiecesOK() @@ -108,7 +109,7 @@ func (o *ListPiecesOK) GetPayload() []*models.DataprepPieceList { func (o *ListPiecesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListPiecesBadRequest) readResponse(response runtime.ClientResponse, con o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListPiecesInternalServerError) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/piece/piece_client.go b/client/swagger/http/piece/piece_client.go index 1569b11b..e4f49f90 100644 --- a/client/swagger/http/piece/piece_client.go +++ b/client/swagger/http/piece/piece_client.go @@ -58,6 +58,8 @@ type ClientOption func(*runtime.ClientOperation) type ClientService interface { AddPiece(params *AddPieceParams, opts ...ClientOption) (*AddPieceOK, error) + DeletePiece(params *DeletePieceParams, opts ...ClientOption) (*DeletePieceNoContent, error) + GetPieceIDMetadata(params *GetPieceIDMetadataParams, opts ...ClientOption) (*GetPieceIDMetadataOK, error) ListPieces(params *ListPiecesParams, opts ...ClientOption) (*ListPiecesOK, error) @@ -69,7 +71,7 @@ type ClientService interface { AddPiece adds a piece to a preparation */ func (a *Client) AddPiece(params *AddPieceParams, opts ...ClientOption) (*AddPieceOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewAddPieceParams() } @@ -88,28 +90,80 @@ func (a *Client) AddPiece(params *AddPieceParams, opts ...ClientOption) (*AddPie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*AddPieceOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for AddPiece: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* + DeletePiece deletes a piece from a preparation + + Deletes a piece (CAR) and its associated records. For data pieces, resets file ranges + +to allow re-packing. For DAG pieces, resets directory export flags for re-generation. +*/ +func (a *Client) DeletePiece(params *DeletePieceParams, opts ...ClientOption) (*DeletePieceNoContent, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewDeletePieceParams() + } + op := &runtime.ClientOperation{ + ID: "DeletePiece", + Method: "DELETE", + PathPattern: "/preparation/{id}/piece/{piece_cid}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &DeletePieceReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*DeletePieceNoContent) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for DeletePiece: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* GetPieceIDMetadata gets metadata for a piece Get metadata for a piece for how it may be reassembled from the data source */ func (a *Client) GetPieceIDMetadata(params *GetPieceIDMetadataParams, opts ...ClientOption) (*GetPieceIDMetadataOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewGetPieceIDMetadataParams() } @@ -128,17 +182,22 @@ func (a *Client) GetPieceIDMetadata(params *GetPieceIDMetadataParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*GetPieceIDMetadataOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for GetPieceIDMetadata: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -147,7 +206,7 @@ func (a *Client) GetPieceIDMetadata(params *GetPieceIDMetadataParams, opts ...Cl ListPieces lists all prepared pieces for a preparation */ func (a *Client) ListPieces(params *ListPiecesParams, opts ...ClientOption) (*ListPiecesOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListPiecesParams() } @@ -166,17 +225,22 @@ func (a *Client) ListPieces(params *ListPiecesParams, opts ...ClientOption) (*Li for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListPiecesOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListPieces: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/preparation/add_output_storage_responses.go b/client/swagger/http/preparation/add_output_storage_responses.go index bd5e49d2..154e5dd9 100644 --- a/client/swagger/http/preparation/add_output_storage_responses.go +++ b/client/swagger/http/preparation/add_output_storage_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type AddOutputStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *AddOutputStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *AddOutputStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewAddOutputStorageOK() @@ -110,7 +111,7 @@ func (o *AddOutputStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *AddOutputStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *AddOutputStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/add_source_storage_responses.go b/client/swagger/http/preparation/add_source_storage_responses.go index 95267ff9..f1fb043b 100644 --- a/client/swagger/http/preparation/add_source_storage_responses.go +++ b/client/swagger/http/preparation/add_source_storage_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type AddSourceStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *AddSourceStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *AddSourceStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewAddSourceStorageOK() @@ -110,7 +111,7 @@ func (o *AddSourceStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *AddSourceStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *AddSourceStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/create_preparation_responses.go b/client/swagger/http/preparation/create_preparation_responses.go index 07550f03..83355b9c 100644 --- a/client/swagger/http/preparation/create_preparation_responses.go +++ b/client/swagger/http/preparation/create_preparation_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreatePreparationReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreatePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreatePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreatePreparationOK() @@ -110,7 +111,7 @@ func (o *CreatePreparationOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreatePreparationBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreatePreparationInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/explore_preparation_responses.go b/client/swagger/http/preparation/explore_preparation_responses.go index c42cf36c..3bbef6a9 100644 --- a/client/swagger/http/preparation/explore_preparation_responses.go +++ b/client/swagger/http/preparation/explore_preparation_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ExplorePreparationReader struct { } // ReadResponse reads a server response into the received o. -func (o *ExplorePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ExplorePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewExplorePreparationOK() @@ -110,7 +111,7 @@ func (o *ExplorePreparationOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.DataprepExploreResult) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *ExplorePreparationBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *ExplorePreparationInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/get_preparation_status_responses.go b/client/swagger/http/preparation/get_preparation_status_responses.go index aada5afe..5256ab3a 100644 --- a/client/swagger/http/preparation/get_preparation_status_responses.go +++ b/client/swagger/http/preparation/get_preparation_status_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type GetPreparationStatusReader struct { } // ReadResponse reads a server response into the received o. -func (o *GetPreparationStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *GetPreparationStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewGetPreparationStatusOK() @@ -108,7 +109,7 @@ func (o *GetPreparationStatusOK) GetPayload() []*models.JobSourceStatus { func (o *GetPreparationStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *GetPreparationStatusBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *GetPreparationStatusInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/list_preparations_responses.go b/client/swagger/http/preparation/list_preparations_responses.go index 8be38748..9d416a59 100644 --- a/client/swagger/http/preparation/list_preparations_responses.go +++ b/client/swagger/http/preparation/list_preparations_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListPreparationsReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListPreparationsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListPreparationsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListPreparationsOK() @@ -108,7 +109,7 @@ func (o *ListPreparationsOK) GetPayload() []*models.ModelPreparation { func (o *ListPreparationsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListPreparationsBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListPreparationsInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/preparation_client.go b/client/swagger/http/preparation/preparation_client.go index 27050713..1545cb08 100644 --- a/client/swagger/http/preparation/preparation_client.go +++ b/client/swagger/http/preparation/preparation_client.go @@ -81,7 +81,7 @@ type ClientService interface { AddOutputStorage attaches an output storage with a preparation */ func (a *Client) AddOutputStorage(params *AddOutputStorageParams, opts ...ClientOption) (*AddOutputStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewAddOutputStorageParams() } @@ -100,17 +100,22 @@ func (a *Client) AddOutputStorage(params *AddOutputStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*AddOutputStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for AddOutputStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -119,7 +124,7 @@ func (a *Client) AddOutputStorage(params *AddOutputStorageParams, opts ...Client AddSourceStorage attaches a source storage with a preparation */ func (a *Client) AddSourceStorage(params *AddSourceStorageParams, opts ...ClientOption) (*AddSourceStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewAddSourceStorageParams() } @@ -138,17 +143,22 @@ func (a *Client) AddSourceStorage(params *AddSourceStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*AddSourceStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for AddSourceStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -157,7 +167,7 @@ func (a *Client) AddSourceStorage(params *AddSourceStorageParams, opts ...Client CreatePreparation creates a new preparation */ func (a *Client) CreatePreparation(params *CreatePreparationParams, opts ...ClientOption) (*CreatePreparationOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreatePreparationParams() } @@ -176,17 +186,22 @@ func (a *Client) CreatePreparation(params *CreatePreparationParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreatePreparationOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreatePreparation: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -195,7 +210,7 @@ func (a *Client) CreatePreparation(params *CreatePreparationParams, opts ...Clie ExplorePreparation explores a directory in a prepared source storage */ func (a *Client) ExplorePreparation(params *ExplorePreparationParams, opts ...ClientOption) (*ExplorePreparationOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewExplorePreparationParams() } @@ -214,17 +229,22 @@ func (a *Client) ExplorePreparation(params *ExplorePreparationParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ExplorePreparationOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ExplorePreparation: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -233,7 +253,7 @@ func (a *Client) ExplorePreparation(params *ExplorePreparationParams, opts ...Cl GetPreparationStatus gets the status of a preparation */ func (a *Client) GetPreparationStatus(params *GetPreparationStatusParams, opts ...ClientOption) (*GetPreparationStatusOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewGetPreparationStatusParams() } @@ -252,17 +272,22 @@ func (a *Client) GetPreparationStatus(params *GetPreparationStatusParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*GetPreparationStatusOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for GetPreparationStatus: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -271,7 +296,7 @@ func (a *Client) GetPreparationStatus(params *GetPreparationStatusParams, opts . ListPreparations lists all preparations */ func (a *Client) ListPreparations(params *ListPreparationsParams, opts ...ClientOption) (*ListPreparationsOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListPreparationsParams() } @@ -290,17 +315,22 @@ func (a *Client) ListPreparations(params *ListPreparationsParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListPreparationsOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListPreparations: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -309,7 +339,7 @@ func (a *Client) ListPreparations(params *ListPreparationsParams, opts ...Client RemoveOutputStorage detaches an output storage from a preparation */ func (a *Client) RemoveOutputStorage(params *RemoveOutputStorageParams, opts ...ClientOption) (*RemoveOutputStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRemoveOutputStorageParams() } @@ -328,17 +358,22 @@ func (a *Client) RemoveOutputStorage(params *RemoveOutputStorageParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RemoveOutputStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RemoveOutputStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -347,7 +382,7 @@ func (a *Client) RemoveOutputStorage(params *RemoveOutputStorageParams, opts ... RemovePreparation removes a preparation */ func (a *Client) RemovePreparation(params *RemovePreparationParams, opts ...ClientOption) (*RemovePreparationNoContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRemovePreparationParams() } @@ -366,17 +401,22 @@ func (a *Client) RemovePreparation(params *RemovePreparationParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RemovePreparationNoContent) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RemovePreparation: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -385,7 +425,7 @@ func (a *Client) RemovePreparation(params *RemovePreparationParams, opts ...Clie RenamePreparation renames a preparation */ func (a *Client) RenamePreparation(params *RenamePreparationParams, opts ...ClientOption) (*RenamePreparationOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRenamePreparationParams() } @@ -404,17 +444,22 @@ func (a *Client) RenamePreparation(params *RenamePreparationParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RenamePreparationOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RenamePreparation: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/preparation/remove_output_storage_responses.go b/client/swagger/http/preparation/remove_output_storage_responses.go index dc51b314..34683d41 100644 --- a/client/swagger/http/preparation/remove_output_storage_responses.go +++ b/client/swagger/http/preparation/remove_output_storage_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RemoveOutputStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *RemoveOutputStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RemoveOutputStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewRemoveOutputStorageOK() @@ -110,7 +111,7 @@ func (o *RemoveOutputStorageOK) readResponse(response runtime.ClientResponse, co o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *RemoveOutputStorageBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *RemoveOutputStorageInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/remove_preparation_responses.go b/client/swagger/http/preparation/remove_preparation_responses.go index 5b952da2..3172cb37 100644 --- a/client/swagger/http/preparation/remove_preparation_responses.go +++ b/client/swagger/http/preparation/remove_preparation_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RemovePreparationReader struct { } // ReadResponse reads a server response into the received o. -func (o *RemovePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RemovePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 204: result := NewRemovePreparationNoContent() @@ -166,7 +167,7 @@ func (o *RemovePreparationBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -236,7 +237,7 @@ func (o *RemovePreparationInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/preparation/rename_preparation_responses.go b/client/swagger/http/preparation/rename_preparation_responses.go index 87aa62db..d6f35bc5 100644 --- a/client/swagger/http/preparation/rename_preparation_responses.go +++ b/client/swagger/http/preparation/rename_preparation_responses.go @@ -7,6 +7,7 @@ package preparation import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RenamePreparationReader struct { } // ReadResponse reads a server response into the received o. -func (o *RenamePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RenamePreparationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewRenamePreparationOK() @@ -110,7 +111,7 @@ func (o *RenamePreparationOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *RenamePreparationBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *RenamePreparationInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/singularity_api_client.go b/client/swagger/http/singularity_api_client.go index 68172c79..655348ef 100644 --- a/client/swagger/http/singularity_api_client.go +++ b/client/swagger/http/singularity_api_client.go @@ -28,7 +28,7 @@ var Default = NewHTTPClient(nil) const ( // DefaultHost is the default Host // found in Meta (info) section of spec file - DefaultHost string = "localhost:9090" + DefaultHost string = "localhost" // DefaultBasePath is the default BasePath // found in Meta (info) section of spec file DefaultBasePath string = "/api" diff --git a/client/swagger/http/storage/create_acd_storage_parameters.go b/client/swagger/http/storage/create_acd_storage_parameters.go deleted file mode 100644 index c2b7086f..00000000 --- a/client/swagger/http/storage/create_acd_storage_parameters.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package storage - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/data-preservation-programs/singularity/client/swagger/models" -) - -// NewCreateAcdStorageParams creates a new CreateAcdStorageParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewCreateAcdStorageParams() *CreateAcdStorageParams { - return &CreateAcdStorageParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewCreateAcdStorageParamsWithTimeout creates a new CreateAcdStorageParams object -// with the ability to set a timeout on a request. -func NewCreateAcdStorageParamsWithTimeout(timeout time.Duration) *CreateAcdStorageParams { - return &CreateAcdStorageParams{ - timeout: timeout, - } -} - -// NewCreateAcdStorageParamsWithContext creates a new CreateAcdStorageParams object -// with the ability to set a context for a request. -func NewCreateAcdStorageParamsWithContext(ctx context.Context) *CreateAcdStorageParams { - return &CreateAcdStorageParams{ - Context: ctx, - } -} - -// NewCreateAcdStorageParamsWithHTTPClient creates a new CreateAcdStorageParams object -// with the ability to set a custom HTTPClient for a request. -func NewCreateAcdStorageParamsWithHTTPClient(client *http.Client) *CreateAcdStorageParams { - return &CreateAcdStorageParams{ - HTTPClient: client, - } -} - -/* -CreateAcdStorageParams contains all the parameters to send to the API endpoint - - for the create acd storage operation. - - Typically these are written to a http.Request. -*/ -type CreateAcdStorageParams struct { - - /* Request. - - Request body - */ - Request *models.StorageCreateAcdStorageRequest - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the create acd storage params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *CreateAcdStorageParams) WithDefaults() *CreateAcdStorageParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the create acd storage params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *CreateAcdStorageParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the create acd storage params -func (o *CreateAcdStorageParams) WithTimeout(timeout time.Duration) *CreateAcdStorageParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the create acd storage params -func (o *CreateAcdStorageParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the create acd storage params -func (o *CreateAcdStorageParams) WithContext(ctx context.Context) *CreateAcdStorageParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the create acd storage params -func (o *CreateAcdStorageParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the create acd storage params -func (o *CreateAcdStorageParams) WithHTTPClient(client *http.Client) *CreateAcdStorageParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the create acd storage params -func (o *CreateAcdStorageParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithRequest adds the request to the create acd storage params -func (o *CreateAcdStorageParams) WithRequest(request *models.StorageCreateAcdStorageRequest) *CreateAcdStorageParams { - o.SetRequest(request) - return o -} - -// SetRequest adds the request to the create acd storage params -func (o *CreateAcdStorageParams) SetRequest(request *models.StorageCreateAcdStorageRequest) { - o.Request = request -} - -// WriteToRequest writes these params to a swagger request -func (o *CreateAcdStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Request != nil { - if err := r.SetBodyParam(o.Request); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/client/swagger/http/storage/create_acd_storage_responses.go b/client/swagger/http/storage/create_acd_storage_responses.go deleted file mode 100644 index 2a38b23b..00000000 --- a/client/swagger/http/storage/create_acd_storage_responses.go +++ /dev/null @@ -1,258 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package storage - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/data-preservation-programs/singularity/client/swagger/models" -) - -// CreateAcdStorageReader is a Reader for the CreateAcdStorage structure. -type CreateAcdStorageReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *CreateAcdStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewCreateAcdStorageOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewCreateAcdStorageBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewCreateAcdStorageInternalServerError() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("[POST /storage/acd] CreateAcdStorage", response, response.Code()) - } -} - -// NewCreateAcdStorageOK creates a CreateAcdStorageOK with default headers values -func NewCreateAcdStorageOK() *CreateAcdStorageOK { - return &CreateAcdStorageOK{} -} - -/* -CreateAcdStorageOK describes a response with status code 200, with default header values. - -OK -*/ -type CreateAcdStorageOK struct { - Payload *models.ModelStorage -} - -// IsSuccess returns true when this create acd storage o k response has a 2xx status code -func (o *CreateAcdStorageOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this create acd storage o k response has a 3xx status code -func (o *CreateAcdStorageOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this create acd storage o k response has a 4xx status code -func (o *CreateAcdStorageOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this create acd storage o k response has a 5xx status code -func (o *CreateAcdStorageOK) IsServerError() bool { - return false -} - -// IsCode returns true when this create acd storage o k response a status code equal to that given -func (o *CreateAcdStorageOK) IsCode(code int) bool { - return code == 200 -} - -// Code gets the status code for the create acd storage o k response -func (o *CreateAcdStorageOK) Code() int { - return 200 -} - -func (o *CreateAcdStorageOK) Error() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/acd][%d] createAcdStorageOK %s", 200, payload) -} - -func (o *CreateAcdStorageOK) String() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/acd][%d] createAcdStorageOK %s", 200, payload) -} - -func (o *CreateAcdStorageOK) GetPayload() *models.ModelStorage { - return o.Payload -} - -func (o *CreateAcdStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.ModelStorage) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewCreateAcdStorageBadRequest creates a CreateAcdStorageBadRequest with default headers values -func NewCreateAcdStorageBadRequest() *CreateAcdStorageBadRequest { - return &CreateAcdStorageBadRequest{} -} - -/* -CreateAcdStorageBadRequest describes a response with status code 400, with default header values. - -Bad Request -*/ -type CreateAcdStorageBadRequest struct { - Payload *models.APIHTTPError -} - -// IsSuccess returns true when this create acd storage bad request response has a 2xx status code -func (o *CreateAcdStorageBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this create acd storage bad request response has a 3xx status code -func (o *CreateAcdStorageBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this create acd storage bad request response has a 4xx status code -func (o *CreateAcdStorageBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this create acd storage bad request response has a 5xx status code -func (o *CreateAcdStorageBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this create acd storage bad request response a status code equal to that given -func (o *CreateAcdStorageBadRequest) IsCode(code int) bool { - return code == 400 -} - -// Code gets the status code for the create acd storage bad request response -func (o *CreateAcdStorageBadRequest) Code() int { - return 400 -} - -func (o *CreateAcdStorageBadRequest) Error() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/acd][%d] createAcdStorageBadRequest %s", 400, payload) -} - -func (o *CreateAcdStorageBadRequest) String() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/acd][%d] createAcdStorageBadRequest %s", 400, payload) -} - -func (o *CreateAcdStorageBadRequest) GetPayload() *models.APIHTTPError { - return o.Payload -} - -func (o *CreateAcdStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.APIHTTPError) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewCreateAcdStorageInternalServerError creates a CreateAcdStorageInternalServerError with default headers values -func NewCreateAcdStorageInternalServerError() *CreateAcdStorageInternalServerError { - return &CreateAcdStorageInternalServerError{} -} - -/* -CreateAcdStorageInternalServerError describes a response with status code 500, with default header values. - -Internal Server Error -*/ -type CreateAcdStorageInternalServerError struct { - Payload *models.APIHTTPError -} - -// IsSuccess returns true when this create acd storage internal server error response has a 2xx status code -func (o *CreateAcdStorageInternalServerError) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this create acd storage internal server error response has a 3xx status code -func (o *CreateAcdStorageInternalServerError) IsRedirect() bool { - return false -} - -// IsClientError returns true when this create acd storage internal server error response has a 4xx status code -func (o *CreateAcdStorageInternalServerError) IsClientError() bool { - return false -} - -// IsServerError returns true when this create acd storage internal server error response has a 5xx status code -func (o *CreateAcdStorageInternalServerError) IsServerError() bool { - return true -} - -// IsCode returns true when this create acd storage internal server error response a status code equal to that given -func (o *CreateAcdStorageInternalServerError) IsCode(code int) bool { - return code == 500 -} - -// Code gets the status code for the create acd storage internal server error response -func (o *CreateAcdStorageInternalServerError) Code() int { - return 500 -} - -func (o *CreateAcdStorageInternalServerError) Error() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/acd][%d] createAcdStorageInternalServerError %s", 500, payload) -} - -func (o *CreateAcdStorageInternalServerError) String() string { - payload, _ := json.Marshal(o.Payload) - return fmt.Sprintf("[POST /storage/acd][%d] createAcdStorageInternalServerError %s", 500, payload) -} - -func (o *CreateAcdStorageInternalServerError) GetPayload() *models.APIHTTPError { - return o.Payload -} - -func (o *CreateAcdStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.APIHTTPError) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/client/swagger/http/storage/create_azureblob_storage_responses.go b/client/swagger/http/storage/create_azureblob_storage_responses.go index 452e7ece..53eefe50 100644 --- a/client/swagger/http/storage/create_azureblob_storage_responses.go +++ b/client/swagger/http/storage/create_azureblob_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateAzureblobStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateAzureblobStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateAzureblobStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateAzureblobStorageOK() @@ -110,7 +111,7 @@ func (o *CreateAzureblobStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateAzureblobStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateAzureblobStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_b2_storage_responses.go b/client/swagger/http/storage/create_b2_storage_responses.go index 5fcd47eb..3301af2f 100644 --- a/client/swagger/http/storage/create_b2_storage_responses.go +++ b/client/swagger/http/storage/create_b2_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateB2StorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateB2StorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateB2StorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateB2StorageOK() @@ -110,7 +111,7 @@ func (o *CreateB2StorageOK) readResponse(response runtime.ClientResponse, consum o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateB2StorageBadRequest) readResponse(response runtime.ClientResponse o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateB2StorageInternalServerError) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_box_storage_responses.go b/client/swagger/http/storage/create_box_storage_responses.go index 115b0c62..4c068f4e 100644 --- a/client/swagger/http/storage/create_box_storage_responses.go +++ b/client/swagger/http/storage/create_box_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateBoxStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateBoxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateBoxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateBoxStorageOK() @@ -110,7 +111,7 @@ func (o *CreateBoxStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateBoxStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateBoxStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_drive_storage_responses.go b/client/swagger/http/storage/create_drive_storage_responses.go index 36314238..6afc91b7 100644 --- a/client/swagger/http/storage/create_drive_storage_responses.go +++ b/client/swagger/http/storage/create_drive_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateDriveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateDriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateDriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateDriveStorageOK() @@ -110,7 +111,7 @@ func (o *CreateDriveStorageOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateDriveStorageBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateDriveStorageInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_dropbox_storage_responses.go b/client/swagger/http/storage/create_dropbox_storage_responses.go index 3bbd61c5..3263db39 100644 --- a/client/swagger/http/storage/create_dropbox_storage_responses.go +++ b/client/swagger/http/storage/create_dropbox_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateDropboxStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateDropboxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateDropboxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateDropboxStorageOK() @@ -110,7 +111,7 @@ func (o *CreateDropboxStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateDropboxStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateDropboxStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_fichier_storage_responses.go b/client/swagger/http/storage/create_fichier_storage_responses.go index d2f467ba..cf7dbbfc 100644 --- a/client/swagger/http/storage/create_fichier_storage_responses.go +++ b/client/swagger/http/storage/create_fichier_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateFichierStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateFichierStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateFichierStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateFichierStorageOK() @@ -110,7 +111,7 @@ func (o *CreateFichierStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateFichierStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateFichierStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_filefabric_storage_responses.go b/client/swagger/http/storage/create_filefabric_storage_responses.go index c0fcdf18..c1b4b894 100644 --- a/client/swagger/http/storage/create_filefabric_storage_responses.go +++ b/client/swagger/http/storage/create_filefabric_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateFilefabricStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateFilefabricStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateFilefabricStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateFilefabricStorageOK() @@ -110,7 +111,7 @@ func (o *CreateFilefabricStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateFilefabricStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateFilefabricStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_ftp_storage_responses.go b/client/swagger/http/storage/create_ftp_storage_responses.go index 7c82c08c..a4ea8e9c 100644 --- a/client/swagger/http/storage/create_ftp_storage_responses.go +++ b/client/swagger/http/storage/create_ftp_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateFtpStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateFtpStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateFtpStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateFtpStorageOK() @@ -110,7 +111,7 @@ func (o *CreateFtpStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateFtpStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateFtpStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_gcs_storage_responses.go b/client/swagger/http/storage/create_gcs_storage_responses.go index a0130e8d..2383eed1 100644 --- a/client/swagger/http/storage/create_gcs_storage_responses.go +++ b/client/swagger/http/storage/create_gcs_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateGcsStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateGcsStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateGcsStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateGcsStorageOK() @@ -110,7 +111,7 @@ func (o *CreateGcsStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateGcsStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateGcsStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_gphotos_storage_responses.go b/client/swagger/http/storage/create_gphotos_storage_responses.go index e59d02f2..d92284d2 100644 --- a/client/swagger/http/storage/create_gphotos_storage_responses.go +++ b/client/swagger/http/storage/create_gphotos_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateGphotosStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateGphotosStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateGphotosStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateGphotosStorageOK() @@ -110,7 +111,7 @@ func (o *CreateGphotosStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateGphotosStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateGphotosStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_hdfs_storage_responses.go b/client/swagger/http/storage/create_hdfs_storage_responses.go index 687fa236..958ec45f 100644 --- a/client/swagger/http/storage/create_hdfs_storage_responses.go +++ b/client/swagger/http/storage/create_hdfs_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateHdfsStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateHdfsStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateHdfsStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateHdfsStorageOK() @@ -110,7 +111,7 @@ func (o *CreateHdfsStorageOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateHdfsStorageBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateHdfsStorageInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_hidrive_storage_responses.go b/client/swagger/http/storage/create_hidrive_storage_responses.go index 75df0b9f..1089105b 100644 --- a/client/swagger/http/storage/create_hidrive_storage_responses.go +++ b/client/swagger/http/storage/create_hidrive_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateHidriveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateHidriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateHidriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateHidriveStorageOK() @@ -110,7 +111,7 @@ func (o *CreateHidriveStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateHidriveStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateHidriveStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_http_storage_responses.go b/client/swagger/http/storage/create_http_storage_responses.go index 868ac947..0db91488 100644 --- a/client/swagger/http/storage/create_http_storage_responses.go +++ b/client/swagger/http/storage/create_http_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateHTTPStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateHTTPStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateHTTPStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateHTTPStorageOK() @@ -110,7 +111,7 @@ func (o *CreateHTTPStorageOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateHTTPStorageBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateHTTPStorageInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_internetarchive_storage_responses.go b/client/swagger/http/storage/create_internetarchive_storage_responses.go index 83f08045..e255622d 100644 --- a/client/swagger/http/storage/create_internetarchive_storage_responses.go +++ b/client/swagger/http/storage/create_internetarchive_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateInternetarchiveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateInternetarchiveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateInternetarchiveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateInternetarchiveStorageOK() @@ -110,7 +111,7 @@ func (o *CreateInternetarchiveStorageOK) readResponse(response runtime.ClientRes o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateInternetarchiveStorageBadRequest) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateInternetarchiveStorageInternalServerError) readResponse(response o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_jottacloud_storage_responses.go b/client/swagger/http/storage/create_jottacloud_storage_responses.go index 0d1055dd..e5ceac96 100644 --- a/client/swagger/http/storage/create_jottacloud_storage_responses.go +++ b/client/swagger/http/storage/create_jottacloud_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateJottacloudStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateJottacloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateJottacloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateJottacloudStorageOK() @@ -110,7 +111,7 @@ func (o *CreateJottacloudStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateJottacloudStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateJottacloudStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_koofr_digistorage_storage_responses.go b/client/swagger/http/storage/create_koofr_digistorage_storage_responses.go index 2b9dbe61..d056d106 100644 --- a/client/swagger/http/storage/create_koofr_digistorage_storage_responses.go +++ b/client/swagger/http/storage/create_koofr_digistorage_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateKoofrDigistorageStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateKoofrDigistorageStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateKoofrDigistorageStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateKoofrDigistorageStorageOK() @@ -110,7 +111,7 @@ func (o *CreateKoofrDigistorageStorageOK) readResponse(response runtime.ClientRe o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateKoofrDigistorageStorageBadRequest) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateKoofrDigistorageStorageInternalServerError) readResponse(response o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_koofr_koofr_storage_responses.go b/client/swagger/http/storage/create_koofr_koofr_storage_responses.go index 336bb1d4..7c8e8dba 100644 --- a/client/swagger/http/storage/create_koofr_koofr_storage_responses.go +++ b/client/swagger/http/storage/create_koofr_koofr_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateKoofrKoofrStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateKoofrKoofrStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateKoofrKoofrStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateKoofrKoofrStorageOK() @@ -110,7 +111,7 @@ func (o *CreateKoofrKoofrStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateKoofrKoofrStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateKoofrKoofrStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_koofr_other_storage_responses.go b/client/swagger/http/storage/create_koofr_other_storage_responses.go index 62a9b09a..4af140ce 100644 --- a/client/swagger/http/storage/create_koofr_other_storage_responses.go +++ b/client/swagger/http/storage/create_koofr_other_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateKoofrOtherStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateKoofrOtherStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateKoofrOtherStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateKoofrOtherStorageOK() @@ -110,7 +111,7 @@ func (o *CreateKoofrOtherStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateKoofrOtherStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateKoofrOtherStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_local_storage_responses.go b/client/swagger/http/storage/create_local_storage_responses.go index bd3d9283..d3d59868 100644 --- a/client/swagger/http/storage/create_local_storage_responses.go +++ b/client/swagger/http/storage/create_local_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateLocalStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateLocalStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateLocalStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateLocalStorageOK() @@ -110,7 +111,7 @@ func (o *CreateLocalStorageOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateLocalStorageBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateLocalStorageInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_mailru_storage_responses.go b/client/swagger/http/storage/create_mailru_storage_responses.go index 3db5153a..11218f39 100644 --- a/client/swagger/http/storage/create_mailru_storage_responses.go +++ b/client/swagger/http/storage/create_mailru_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateMailruStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateMailruStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateMailruStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateMailruStorageOK() @@ -110,7 +111,7 @@ func (o *CreateMailruStorageOK) readResponse(response runtime.ClientResponse, co o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateMailruStorageBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateMailruStorageInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_mega_storage_responses.go b/client/swagger/http/storage/create_mega_storage_responses.go index 614bf0f1..024f2618 100644 --- a/client/swagger/http/storage/create_mega_storage_responses.go +++ b/client/swagger/http/storage/create_mega_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateMegaStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateMegaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateMegaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateMegaStorageOK() @@ -110,7 +111,7 @@ func (o *CreateMegaStorageOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateMegaStorageBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateMegaStorageInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_netstorage_storage_responses.go b/client/swagger/http/storage/create_netstorage_storage_responses.go index 12c6a2be..e4ed2984 100644 --- a/client/swagger/http/storage/create_netstorage_storage_responses.go +++ b/client/swagger/http/storage/create_netstorage_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateNetstorageStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateNetstorageStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateNetstorageStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateNetstorageStorageOK() @@ -110,7 +111,7 @@ func (o *CreateNetstorageStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateNetstorageStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateNetstorageStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_onedrive_storage_responses.go b/client/swagger/http/storage/create_onedrive_storage_responses.go index 57bf0446..aae33fbd 100644 --- a/client/swagger/http/storage/create_onedrive_storage_responses.go +++ b/client/swagger/http/storage/create_onedrive_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOnedriveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOnedriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOnedriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOnedriveStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOnedriveStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOnedriveStorageBadRequest) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOnedriveStorageInternalServerError) readResponse(response runtime o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_oos_env_auth_storage_responses.go b/client/swagger/http/storage/create_oos_env_auth_storage_responses.go index 1ba598a5..10773570 100644 --- a/client/swagger/http/storage/create_oos_env_auth_storage_responses.go +++ b/client/swagger/http/storage/create_oos_env_auth_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOosEnvAuthStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOosEnvAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOosEnvAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOosEnvAuthStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOosEnvAuthStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOosEnvAuthStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOosEnvAuthStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_oos_instance_principal_auth_storage_responses.go b/client/swagger/http/storage/create_oos_instance_principal_auth_storage_responses.go index e65c70a4..fb28c2ec 100644 --- a/client/swagger/http/storage/create_oos_instance_principal_auth_storage_responses.go +++ b/client/swagger/http/storage/create_oos_instance_principal_auth_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOosInstancePrincipalAuthStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOosInstancePrincipalAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOosInstancePrincipalAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOosInstancePrincipalAuthStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOosInstancePrincipalAuthStorageOK) readResponse(response runtime. o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOosInstancePrincipalAuthStorageBadRequest) readResponse(response o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOosInstancePrincipalAuthStorageInternalServerError) readResponse( o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_oos_no_auth_storage_responses.go b/client/swagger/http/storage/create_oos_no_auth_storage_responses.go index 0f5c0d3b..9d4f35db 100644 --- a/client/swagger/http/storage/create_oos_no_auth_storage_responses.go +++ b/client/swagger/http/storage/create_oos_no_auth_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOosNoAuthStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOosNoAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOosNoAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOosNoAuthStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOosNoAuthStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOosNoAuthStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOosNoAuthStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_oos_resource_principal_auth_storage_responses.go b/client/swagger/http/storage/create_oos_resource_principal_auth_storage_responses.go index 9cf1b4e0..e6bd22b7 100644 --- a/client/swagger/http/storage/create_oos_resource_principal_auth_storage_responses.go +++ b/client/swagger/http/storage/create_oos_resource_principal_auth_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOosResourcePrincipalAuthStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOosResourcePrincipalAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOosResourcePrincipalAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOosResourcePrincipalAuthStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOosResourcePrincipalAuthStorageOK) readResponse(response runtime. o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOosResourcePrincipalAuthStorageBadRequest) readResponse(response o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOosResourcePrincipalAuthStorageInternalServerError) readResponse( o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_oos_user_principal_auth_storage_responses.go b/client/swagger/http/storage/create_oos_user_principal_auth_storage_responses.go index fdfd4206..ea87a420 100644 --- a/client/swagger/http/storage/create_oos_user_principal_auth_storage_responses.go +++ b/client/swagger/http/storage/create_oos_user_principal_auth_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOosUserPrincipalAuthStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOosUserPrincipalAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOosUserPrincipalAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOosUserPrincipalAuthStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOosUserPrincipalAuthStorageOK) readResponse(response runtime.Clie o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOosUserPrincipalAuthStorageBadRequest) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOosUserPrincipalAuthStorageInternalServerError) readResponse(resp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_oos_workload_identity_auth_storage_parameters.go b/client/swagger/http/storage/create_oos_workload_identity_auth_storage_parameters.go new file mode 100644 index 00000000..4a069db9 --- /dev/null +++ b/client/swagger/http/storage/create_oos_workload_identity_auth_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateOosWorkloadIdentityAuthStorageParams creates a new CreateOosWorkloadIdentityAuthStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateOosWorkloadIdentityAuthStorageParams() *CreateOosWorkloadIdentityAuthStorageParams { + return &CreateOosWorkloadIdentityAuthStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateOosWorkloadIdentityAuthStorageParamsWithTimeout creates a new CreateOosWorkloadIdentityAuthStorageParams object +// with the ability to set a timeout on a request. +func NewCreateOosWorkloadIdentityAuthStorageParamsWithTimeout(timeout time.Duration) *CreateOosWorkloadIdentityAuthStorageParams { + return &CreateOosWorkloadIdentityAuthStorageParams{ + timeout: timeout, + } +} + +// NewCreateOosWorkloadIdentityAuthStorageParamsWithContext creates a new CreateOosWorkloadIdentityAuthStorageParams object +// with the ability to set a context for a request. +func NewCreateOosWorkloadIdentityAuthStorageParamsWithContext(ctx context.Context) *CreateOosWorkloadIdentityAuthStorageParams { + return &CreateOosWorkloadIdentityAuthStorageParams{ + Context: ctx, + } +} + +// NewCreateOosWorkloadIdentityAuthStorageParamsWithHTTPClient creates a new CreateOosWorkloadIdentityAuthStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateOosWorkloadIdentityAuthStorageParamsWithHTTPClient(client *http.Client) *CreateOosWorkloadIdentityAuthStorageParams { + return &CreateOosWorkloadIdentityAuthStorageParams{ + HTTPClient: client, + } +} + +/* +CreateOosWorkloadIdentityAuthStorageParams contains all the parameters to send to the API endpoint + + for the create oos workload identity auth storage operation. + + Typically these are written to a http.Request. +*/ +type CreateOosWorkloadIdentityAuthStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateOosWorkloadIdentityAuthStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create oos workload identity auth storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateOosWorkloadIdentityAuthStorageParams) WithDefaults() *CreateOosWorkloadIdentityAuthStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create oos workload identity auth storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateOosWorkloadIdentityAuthStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) WithTimeout(timeout time.Duration) *CreateOosWorkloadIdentityAuthStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) WithContext(ctx context.Context) *CreateOosWorkloadIdentityAuthStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) WithHTTPClient(client *http.Client) *CreateOosWorkloadIdentityAuthStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) WithRequest(request *models.StorageCreateOosWorkloadIdentityAuthStorageRequest) *CreateOosWorkloadIdentityAuthStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create oos workload identity auth storage params +func (o *CreateOosWorkloadIdentityAuthStorageParams) SetRequest(request *models.StorageCreateOosWorkloadIdentityAuthStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateOosWorkloadIdentityAuthStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_oos_workload_identity_auth_storage_responses.go b/client/swagger/http/storage/create_oos_workload_identity_auth_storage_responses.go new file mode 100644 index 00000000..e52d8825 --- /dev/null +++ b/client/swagger/http/storage/create_oos_workload_identity_auth_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateOosWorkloadIdentityAuthStorageReader is a Reader for the CreateOosWorkloadIdentityAuthStorage structure. +type CreateOosWorkloadIdentityAuthStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateOosWorkloadIdentityAuthStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateOosWorkloadIdentityAuthStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateOosWorkloadIdentityAuthStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateOosWorkloadIdentityAuthStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/oos/workload_identity_auth] CreateOosWorkload_identity_authStorage", response, response.Code()) + } +} + +// NewCreateOosWorkloadIdentityAuthStorageOK creates a CreateOosWorkloadIdentityAuthStorageOK with default headers values +func NewCreateOosWorkloadIdentityAuthStorageOK() *CreateOosWorkloadIdentityAuthStorageOK { + return &CreateOosWorkloadIdentityAuthStorageOK{} +} + +/* +CreateOosWorkloadIdentityAuthStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateOosWorkloadIdentityAuthStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create oos workload identity auth storage o k response has a 2xx status code +func (o *CreateOosWorkloadIdentityAuthStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create oos workload identity auth storage o k response has a 3xx status code +func (o *CreateOosWorkloadIdentityAuthStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create oos workload identity auth storage o k response has a 4xx status code +func (o *CreateOosWorkloadIdentityAuthStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create oos workload identity auth storage o k response has a 5xx status code +func (o *CreateOosWorkloadIdentityAuthStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create oos workload identity auth storage o k response a status code equal to that given +func (o *CreateOosWorkloadIdentityAuthStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create oos workload identity auth storage o k response +func (o *CreateOosWorkloadIdentityAuthStorageOK) Code() int { + return 200 +} + +func (o *CreateOosWorkloadIdentityAuthStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/oos/workload_identity_auth][%d] createOosWorkloadIdentityAuthStorageOK %s", 200, payload) +} + +func (o *CreateOosWorkloadIdentityAuthStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/oos/workload_identity_auth][%d] createOosWorkloadIdentityAuthStorageOK %s", 200, payload) +} + +func (o *CreateOosWorkloadIdentityAuthStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateOosWorkloadIdentityAuthStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateOosWorkloadIdentityAuthStorageBadRequest creates a CreateOosWorkloadIdentityAuthStorageBadRequest with default headers values +func NewCreateOosWorkloadIdentityAuthStorageBadRequest() *CreateOosWorkloadIdentityAuthStorageBadRequest { + return &CreateOosWorkloadIdentityAuthStorageBadRequest{} +} + +/* +CreateOosWorkloadIdentityAuthStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateOosWorkloadIdentityAuthStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create oos workload identity auth storage bad request response has a 2xx status code +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create oos workload identity auth storage bad request response has a 3xx status code +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create oos workload identity auth storage bad request response has a 4xx status code +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create oos workload identity auth storage bad request response has a 5xx status code +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create oos workload identity auth storage bad request response a status code equal to that given +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create oos workload identity auth storage bad request response +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/oos/workload_identity_auth][%d] createOosWorkloadIdentityAuthStorageBadRequest %s", 400, payload) +} + +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/oos/workload_identity_auth][%d] createOosWorkloadIdentityAuthStorageBadRequest %s", 400, payload) +} + +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateOosWorkloadIdentityAuthStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateOosWorkloadIdentityAuthStorageInternalServerError creates a CreateOosWorkloadIdentityAuthStorageInternalServerError with default headers values +func NewCreateOosWorkloadIdentityAuthStorageInternalServerError() *CreateOosWorkloadIdentityAuthStorageInternalServerError { + return &CreateOosWorkloadIdentityAuthStorageInternalServerError{} +} + +/* +CreateOosWorkloadIdentityAuthStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateOosWorkloadIdentityAuthStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create oos workload identity auth storage internal server error response has a 2xx status code +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create oos workload identity auth storage internal server error response has a 3xx status code +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create oos workload identity auth storage internal server error response has a 4xx status code +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create oos workload identity auth storage internal server error response has a 5xx status code +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create oos workload identity auth storage internal server error response a status code equal to that given +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create oos workload identity auth storage internal server error response +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/oos/workload_identity_auth][%d] createOosWorkloadIdentityAuthStorageInternalServerError %s", 500, payload) +} + +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/oos/workload_identity_auth][%d] createOosWorkloadIdentityAuthStorageInternalServerError %s", 500, payload) +} + +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateOosWorkloadIdentityAuthStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_opendrive_storage_responses.go b/client/swagger/http/storage/create_opendrive_storage_responses.go index cb19d476..c552d8a0 100644 --- a/client/swagger/http/storage/create_opendrive_storage_responses.go +++ b/client/swagger/http/storage/create_opendrive_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateOpendriveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateOpendriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateOpendriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateOpendriveStorageOK() @@ -110,7 +111,7 @@ func (o *CreateOpendriveStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateOpendriveStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateOpendriveStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_pcloud_storage_responses.go b/client/swagger/http/storage/create_pcloud_storage_responses.go index 9be99aa0..492d323d 100644 --- a/client/swagger/http/storage/create_pcloud_storage_responses.go +++ b/client/swagger/http/storage/create_pcloud_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreatePcloudStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreatePcloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreatePcloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreatePcloudStorageOK() @@ -110,7 +111,7 @@ func (o *CreatePcloudStorageOK) readResponse(response runtime.ClientResponse, co o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreatePcloudStorageBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreatePcloudStorageInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_premiumizeme_storage_responses.go b/client/swagger/http/storage/create_premiumizeme_storage_responses.go index 11360a02..163cd46b 100644 --- a/client/swagger/http/storage/create_premiumizeme_storage_responses.go +++ b/client/swagger/http/storage/create_premiumizeme_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreatePremiumizemeStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreatePremiumizemeStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreatePremiumizemeStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreatePremiumizemeStorageOK() @@ -110,7 +111,7 @@ func (o *CreatePremiumizemeStorageOK) readResponse(response runtime.ClientRespon o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreatePremiumizemeStorageBadRequest) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreatePremiumizemeStorageInternalServerError) readResponse(response run o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_putio_storage_responses.go b/client/swagger/http/storage/create_putio_storage_responses.go index 8eaa0971..9a843e44 100644 --- a/client/swagger/http/storage/create_putio_storage_responses.go +++ b/client/swagger/http/storage/create_putio_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreatePutioStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreatePutioStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreatePutioStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreatePutioStorageOK() @@ -110,7 +111,7 @@ func (o *CreatePutioStorageOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreatePutioStorageBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreatePutioStorageInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_qingstor_storage_responses.go b/client/swagger/http/storage/create_qingstor_storage_responses.go index 6650214a..5e08962a 100644 --- a/client/swagger/http/storage/create_qingstor_storage_responses.go +++ b/client/swagger/http/storage/create_qingstor_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateQingstorStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateQingstorStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateQingstorStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateQingstorStorageOK() @@ -110,7 +111,7 @@ func (o *CreateQingstorStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateQingstorStorageBadRequest) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateQingstorStorageInternalServerError) readResponse(response runtime o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_a_w_s_storage_responses.go b/client/swagger/http/storage/create_s3_a_w_s_storage_responses.go index 144b9533..b8c9bd1d 100644 --- a/client/swagger/http/storage/create_s3_a_w_s_storage_responses.go +++ b/client/swagger/http/storage/create_s3_a_w_s_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3AWSStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3AWSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3AWSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3AWSStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3AWSStorageOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3AWSStorageBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3AWSStorageInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_alibaba_storage_responses.go b/client/swagger/http/storage/create_s3_alibaba_storage_responses.go index 814175a8..eca00cf2 100644 --- a/client/swagger/http/storage/create_s3_alibaba_storage_responses.go +++ b/client/swagger/http/storage/create_s3_alibaba_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3AlibabaStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3AlibabaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3AlibabaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3AlibabaStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3AlibabaStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3AlibabaStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3AlibabaStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_arvan_cloud_storage_responses.go b/client/swagger/http/storage/create_s3_arvan_cloud_storage_responses.go index 6243be95..148255e8 100644 --- a/client/swagger/http/storage/create_s3_arvan_cloud_storage_responses.go +++ b/client/swagger/http/storage/create_s3_arvan_cloud_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3ArvanCloudStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3ArvanCloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3ArvanCloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3ArvanCloudStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3ArvanCloudStorageOK) readResponse(response runtime.ClientRespon o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3ArvanCloudStorageBadRequest) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3ArvanCloudStorageInternalServerError) readResponse(response run o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_ceph_storage_responses.go b/client/swagger/http/storage/create_s3_ceph_storage_responses.go index 5a88ef95..ad726035 100644 --- a/client/swagger/http/storage/create_s3_ceph_storage_responses.go +++ b/client/swagger/http/storage/create_s3_ceph_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3CephStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3CephStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3CephStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3CephStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3CephStorageOK) readResponse(response runtime.ClientResponse, co o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3CephStorageBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3CephStorageInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_china_mobile_storage_responses.go b/client/swagger/http/storage/create_s3_china_mobile_storage_responses.go index 92d1998f..3460c812 100644 --- a/client/swagger/http/storage/create_s3_china_mobile_storage_responses.go +++ b/client/swagger/http/storage/create_s3_china_mobile_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3ChinaMobileStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3ChinaMobileStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3ChinaMobileStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3ChinaMobileStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3ChinaMobileStorageOK) readResponse(response runtime.ClientRespo o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3ChinaMobileStorageBadRequest) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3ChinaMobileStorageInternalServerError) readResponse(response ru o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_cloudflare_storage_responses.go b/client/swagger/http/storage/create_s3_cloudflare_storage_responses.go index 39d93499..2adc9930 100644 --- a/client/swagger/http/storage/create_s3_cloudflare_storage_responses.go +++ b/client/swagger/http/storage/create_s3_cloudflare_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3CloudflareStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3CloudflareStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3CloudflareStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3CloudflareStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3CloudflareStorageOK) readResponse(response runtime.ClientRespon o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3CloudflareStorageBadRequest) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3CloudflareStorageInternalServerError) readResponse(response run o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_digital_ocean_storage_responses.go b/client/swagger/http/storage/create_s3_digital_ocean_storage_responses.go index 8964abf7..9d473c76 100644 --- a/client/swagger/http/storage/create_s3_digital_ocean_storage_responses.go +++ b/client/swagger/http/storage/create_s3_digital_ocean_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3DigitalOceanStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3DigitalOceanStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3DigitalOceanStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3DigitalOceanStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3DigitalOceanStorageOK) readResponse(response runtime.ClientResp o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3DigitalOceanStorageBadRequest) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3DigitalOceanStorageInternalServerError) readResponse(response r o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_dreamhost_storage_responses.go b/client/swagger/http/storage/create_s3_dreamhost_storage_responses.go index 6ce1a3a2..23170b20 100644 --- a/client/swagger/http/storage/create_s3_dreamhost_storage_responses.go +++ b/client/swagger/http/storage/create_s3_dreamhost_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3DreamhostStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3DreamhostStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3DreamhostStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3DreamhostStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3DreamhostStorageOK) readResponse(response runtime.ClientRespons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3DreamhostStorageBadRequest) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3DreamhostStorageInternalServerError) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_g_c_s_storage_parameters.go b/client/swagger/http/storage/create_s3_g_c_s_storage_parameters.go new file mode 100644 index 00000000..88b19111 --- /dev/null +++ b/client/swagger/http/storage/create_s3_g_c_s_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3GCSStorageParams creates a new CreateS3GCSStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3GCSStorageParams() *CreateS3GCSStorageParams { + return &CreateS3GCSStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3GCSStorageParamsWithTimeout creates a new CreateS3GCSStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3GCSStorageParamsWithTimeout(timeout time.Duration) *CreateS3GCSStorageParams { + return &CreateS3GCSStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3GCSStorageParamsWithContext creates a new CreateS3GCSStorageParams object +// with the ability to set a context for a request. +func NewCreateS3GCSStorageParamsWithContext(ctx context.Context) *CreateS3GCSStorageParams { + return &CreateS3GCSStorageParams{ + Context: ctx, + } +} + +// NewCreateS3GCSStorageParamsWithHTTPClient creates a new CreateS3GCSStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3GCSStorageParamsWithHTTPClient(client *http.Client) *CreateS3GCSStorageParams { + return &CreateS3GCSStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3GCSStorageParams contains all the parameters to send to the API endpoint + + for the create s3 g c s storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3GCSStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3GCSStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 g c s storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3GCSStorageParams) WithDefaults() *CreateS3GCSStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 g c s storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3GCSStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) WithTimeout(timeout time.Duration) *CreateS3GCSStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) WithContext(ctx context.Context) *CreateS3GCSStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) WithHTTPClient(client *http.Client) *CreateS3GCSStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) WithRequest(request *models.StorageCreateS3GCSStorageRequest) *CreateS3GCSStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 g c s storage params +func (o *CreateS3GCSStorageParams) SetRequest(request *models.StorageCreateS3GCSStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3GCSStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_g_c_s_storage_responses.go b/client/swagger/http/storage/create_s3_g_c_s_storage_responses.go new file mode 100644 index 00000000..fa5acd2a --- /dev/null +++ b/client/swagger/http/storage/create_s3_g_c_s_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3GCSStorageReader is a Reader for the CreateS3GCSStorage structure. +type CreateS3GCSStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3GCSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3GCSStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3GCSStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3GCSStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/gcs] CreateS3GCSStorage", response, response.Code()) + } +} + +// NewCreateS3GCSStorageOK creates a CreateS3GCSStorageOK with default headers values +func NewCreateS3GCSStorageOK() *CreateS3GCSStorageOK { + return &CreateS3GCSStorageOK{} +} + +/* +CreateS3GCSStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3GCSStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 g c s storage o k response has a 2xx status code +func (o *CreateS3GCSStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 g c s storage o k response has a 3xx status code +func (o *CreateS3GCSStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 g c s storage o k response has a 4xx status code +func (o *CreateS3GCSStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 g c s storage o k response has a 5xx status code +func (o *CreateS3GCSStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 g c s storage o k response a status code equal to that given +func (o *CreateS3GCSStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 g c s storage o k response +func (o *CreateS3GCSStorageOK) Code() int { + return 200 +} + +func (o *CreateS3GCSStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/gcs][%d] createS3GCSStorageOK %s", 200, payload) +} + +func (o *CreateS3GCSStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/gcs][%d] createS3GCSStorageOK %s", 200, payload) +} + +func (o *CreateS3GCSStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3GCSStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3GCSStorageBadRequest creates a CreateS3GCSStorageBadRequest with default headers values +func NewCreateS3GCSStorageBadRequest() *CreateS3GCSStorageBadRequest { + return &CreateS3GCSStorageBadRequest{} +} + +/* +CreateS3GCSStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3GCSStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 g c s storage bad request response has a 2xx status code +func (o *CreateS3GCSStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 g c s storage bad request response has a 3xx status code +func (o *CreateS3GCSStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 g c s storage bad request response has a 4xx status code +func (o *CreateS3GCSStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 g c s storage bad request response has a 5xx status code +func (o *CreateS3GCSStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 g c s storage bad request response a status code equal to that given +func (o *CreateS3GCSStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 g c s storage bad request response +func (o *CreateS3GCSStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3GCSStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/gcs][%d] createS3GCSStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3GCSStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/gcs][%d] createS3GCSStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3GCSStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3GCSStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3GCSStorageInternalServerError creates a CreateS3GCSStorageInternalServerError with default headers values +func NewCreateS3GCSStorageInternalServerError() *CreateS3GCSStorageInternalServerError { + return &CreateS3GCSStorageInternalServerError{} +} + +/* +CreateS3GCSStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3GCSStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 g c s storage internal server error response has a 2xx status code +func (o *CreateS3GCSStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 g c s storage internal server error response has a 3xx status code +func (o *CreateS3GCSStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 g c s storage internal server error response has a 4xx status code +func (o *CreateS3GCSStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 g c s storage internal server error response has a 5xx status code +func (o *CreateS3GCSStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 g c s storage internal server error response a status code equal to that given +func (o *CreateS3GCSStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 g c s storage internal server error response +func (o *CreateS3GCSStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3GCSStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/gcs][%d] createS3GCSStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3GCSStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/gcs][%d] createS3GCSStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3GCSStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3GCSStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_huawei_o_b_s_storage_responses.go b/client/swagger/http/storage/create_s3_huawei_o_b_s_storage_responses.go index d91f19f4..6b8892a7 100644 --- a/client/swagger/http/storage/create_s3_huawei_o_b_s_storage_responses.go +++ b/client/swagger/http/storage/create_s3_huawei_o_b_s_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3HuaweiOBSStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3HuaweiOBSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3HuaweiOBSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3HuaweiOBSStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3HuaweiOBSStorageOK) readResponse(response runtime.ClientRespons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3HuaweiOBSStorageBadRequest) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3HuaweiOBSStorageInternalServerError) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_i_b_m_c_o_s_storage_responses.go b/client/swagger/http/storage/create_s3_i_b_m_c_o_s_storage_responses.go index b0c94747..3e3db8e0 100644 --- a/client/swagger/http/storage/create_s3_i_b_m_c_o_s_storage_responses.go +++ b/client/swagger/http/storage/create_s3_i_b_m_c_o_s_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3IBMCOSStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3IBMCOSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3IBMCOSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3IBMCOSStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3IBMCOSStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3IBMCOSStorageBadRequest) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3IBMCOSStorageInternalServerError) readResponse(response runtime o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_i_drive_storage_responses.go b/client/swagger/http/storage/create_s3_i_drive_storage_responses.go index c2b3f234..c1b25605 100644 --- a/client/swagger/http/storage/create_s3_i_drive_storage_responses.go +++ b/client/swagger/http/storage/create_s3_i_drive_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3IDriveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3IDriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3IDriveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3IDriveStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3IDriveStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3IDriveStorageBadRequest) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3IDriveStorageInternalServerError) readResponse(response runtime o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_i_o_n_o_s_storage_responses.go b/client/swagger/http/storage/create_s3_i_o_n_o_s_storage_responses.go index f4d7958b..74cc6504 100644 --- a/client/swagger/http/storage/create_s3_i_o_n_o_s_storage_responses.go +++ b/client/swagger/http/storage/create_s3_i_o_n_o_s_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3IONOSStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3IONOSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3IONOSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3IONOSStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3IONOSStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3IONOSStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3IONOSStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_leviia_storage_parameters.go b/client/swagger/http/storage/create_s3_leviia_storage_parameters.go new file mode 100644 index 00000000..416a21a5 --- /dev/null +++ b/client/swagger/http/storage/create_s3_leviia_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3LeviiaStorageParams creates a new CreateS3LeviiaStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3LeviiaStorageParams() *CreateS3LeviiaStorageParams { + return &CreateS3LeviiaStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3LeviiaStorageParamsWithTimeout creates a new CreateS3LeviiaStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3LeviiaStorageParamsWithTimeout(timeout time.Duration) *CreateS3LeviiaStorageParams { + return &CreateS3LeviiaStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3LeviiaStorageParamsWithContext creates a new CreateS3LeviiaStorageParams object +// with the ability to set a context for a request. +func NewCreateS3LeviiaStorageParamsWithContext(ctx context.Context) *CreateS3LeviiaStorageParams { + return &CreateS3LeviiaStorageParams{ + Context: ctx, + } +} + +// NewCreateS3LeviiaStorageParamsWithHTTPClient creates a new CreateS3LeviiaStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3LeviiaStorageParamsWithHTTPClient(client *http.Client) *CreateS3LeviiaStorageParams { + return &CreateS3LeviiaStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3LeviiaStorageParams contains all the parameters to send to the API endpoint + + for the create s3 leviia storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3LeviiaStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3LeviiaStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 leviia storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3LeviiaStorageParams) WithDefaults() *CreateS3LeviiaStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 leviia storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3LeviiaStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) WithTimeout(timeout time.Duration) *CreateS3LeviiaStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) WithContext(ctx context.Context) *CreateS3LeviiaStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) WithHTTPClient(client *http.Client) *CreateS3LeviiaStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) WithRequest(request *models.StorageCreateS3LeviiaStorageRequest) *CreateS3LeviiaStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 leviia storage params +func (o *CreateS3LeviiaStorageParams) SetRequest(request *models.StorageCreateS3LeviiaStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3LeviiaStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_leviia_storage_responses.go b/client/swagger/http/storage/create_s3_leviia_storage_responses.go new file mode 100644 index 00000000..8be12ed0 --- /dev/null +++ b/client/swagger/http/storage/create_s3_leviia_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3LeviiaStorageReader is a Reader for the CreateS3LeviiaStorage structure. +type CreateS3LeviiaStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3LeviiaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3LeviiaStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3LeviiaStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3LeviiaStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/leviia] CreateS3LeviiaStorage", response, response.Code()) + } +} + +// NewCreateS3LeviiaStorageOK creates a CreateS3LeviiaStorageOK with default headers values +func NewCreateS3LeviiaStorageOK() *CreateS3LeviiaStorageOK { + return &CreateS3LeviiaStorageOK{} +} + +/* +CreateS3LeviiaStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3LeviiaStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 leviia storage o k response has a 2xx status code +func (o *CreateS3LeviiaStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 leviia storage o k response has a 3xx status code +func (o *CreateS3LeviiaStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 leviia storage o k response has a 4xx status code +func (o *CreateS3LeviiaStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 leviia storage o k response has a 5xx status code +func (o *CreateS3LeviiaStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 leviia storage o k response a status code equal to that given +func (o *CreateS3LeviiaStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 leviia storage o k response +func (o *CreateS3LeviiaStorageOK) Code() int { + return 200 +} + +func (o *CreateS3LeviiaStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/leviia][%d] createS3LeviiaStorageOK %s", 200, payload) +} + +func (o *CreateS3LeviiaStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/leviia][%d] createS3LeviiaStorageOK %s", 200, payload) +} + +func (o *CreateS3LeviiaStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3LeviiaStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3LeviiaStorageBadRequest creates a CreateS3LeviiaStorageBadRequest with default headers values +func NewCreateS3LeviiaStorageBadRequest() *CreateS3LeviiaStorageBadRequest { + return &CreateS3LeviiaStorageBadRequest{} +} + +/* +CreateS3LeviiaStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3LeviiaStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 leviia storage bad request response has a 2xx status code +func (o *CreateS3LeviiaStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 leviia storage bad request response has a 3xx status code +func (o *CreateS3LeviiaStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 leviia storage bad request response has a 4xx status code +func (o *CreateS3LeviiaStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 leviia storage bad request response has a 5xx status code +func (o *CreateS3LeviiaStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 leviia storage bad request response a status code equal to that given +func (o *CreateS3LeviiaStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 leviia storage bad request response +func (o *CreateS3LeviiaStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3LeviiaStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/leviia][%d] createS3LeviiaStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3LeviiaStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/leviia][%d] createS3LeviiaStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3LeviiaStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3LeviiaStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3LeviiaStorageInternalServerError creates a CreateS3LeviiaStorageInternalServerError with default headers values +func NewCreateS3LeviiaStorageInternalServerError() *CreateS3LeviiaStorageInternalServerError { + return &CreateS3LeviiaStorageInternalServerError{} +} + +/* +CreateS3LeviiaStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3LeviiaStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 leviia storage internal server error response has a 2xx status code +func (o *CreateS3LeviiaStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 leviia storage internal server error response has a 3xx status code +func (o *CreateS3LeviiaStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 leviia storage internal server error response has a 4xx status code +func (o *CreateS3LeviiaStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 leviia storage internal server error response has a 5xx status code +func (o *CreateS3LeviiaStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 leviia storage internal server error response a status code equal to that given +func (o *CreateS3LeviiaStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 leviia storage internal server error response +func (o *CreateS3LeviiaStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3LeviiaStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/leviia][%d] createS3LeviiaStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3LeviiaStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/leviia][%d] createS3LeviiaStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3LeviiaStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3LeviiaStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_liara_storage_responses.go b/client/swagger/http/storage/create_s3_liara_storage_responses.go index b247eecd..c566d4ff 100644 --- a/client/swagger/http/storage/create_s3_liara_storage_responses.go +++ b/client/swagger/http/storage/create_s3_liara_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3LiaraStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3LiaraStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3LiaraStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3LiaraStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3LiaraStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3LiaraStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3LiaraStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_linode_storage_parameters.go b/client/swagger/http/storage/create_s3_linode_storage_parameters.go new file mode 100644 index 00000000..b30e6ccf --- /dev/null +++ b/client/swagger/http/storage/create_s3_linode_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3LinodeStorageParams creates a new CreateS3LinodeStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3LinodeStorageParams() *CreateS3LinodeStorageParams { + return &CreateS3LinodeStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3LinodeStorageParamsWithTimeout creates a new CreateS3LinodeStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3LinodeStorageParamsWithTimeout(timeout time.Duration) *CreateS3LinodeStorageParams { + return &CreateS3LinodeStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3LinodeStorageParamsWithContext creates a new CreateS3LinodeStorageParams object +// with the ability to set a context for a request. +func NewCreateS3LinodeStorageParamsWithContext(ctx context.Context) *CreateS3LinodeStorageParams { + return &CreateS3LinodeStorageParams{ + Context: ctx, + } +} + +// NewCreateS3LinodeStorageParamsWithHTTPClient creates a new CreateS3LinodeStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3LinodeStorageParamsWithHTTPClient(client *http.Client) *CreateS3LinodeStorageParams { + return &CreateS3LinodeStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3LinodeStorageParams contains all the parameters to send to the API endpoint + + for the create s3 linode storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3LinodeStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3LinodeStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 linode storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3LinodeStorageParams) WithDefaults() *CreateS3LinodeStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 linode storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3LinodeStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) WithTimeout(timeout time.Duration) *CreateS3LinodeStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) WithContext(ctx context.Context) *CreateS3LinodeStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) WithHTTPClient(client *http.Client) *CreateS3LinodeStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) WithRequest(request *models.StorageCreateS3LinodeStorageRequest) *CreateS3LinodeStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 linode storage params +func (o *CreateS3LinodeStorageParams) SetRequest(request *models.StorageCreateS3LinodeStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3LinodeStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_linode_storage_responses.go b/client/swagger/http/storage/create_s3_linode_storage_responses.go new file mode 100644 index 00000000..c8f1f7cc --- /dev/null +++ b/client/swagger/http/storage/create_s3_linode_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3LinodeStorageReader is a Reader for the CreateS3LinodeStorage structure. +type CreateS3LinodeStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3LinodeStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3LinodeStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3LinodeStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3LinodeStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/linode] CreateS3LinodeStorage", response, response.Code()) + } +} + +// NewCreateS3LinodeStorageOK creates a CreateS3LinodeStorageOK with default headers values +func NewCreateS3LinodeStorageOK() *CreateS3LinodeStorageOK { + return &CreateS3LinodeStorageOK{} +} + +/* +CreateS3LinodeStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3LinodeStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 linode storage o k response has a 2xx status code +func (o *CreateS3LinodeStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 linode storage o k response has a 3xx status code +func (o *CreateS3LinodeStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 linode storage o k response has a 4xx status code +func (o *CreateS3LinodeStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 linode storage o k response has a 5xx status code +func (o *CreateS3LinodeStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 linode storage o k response a status code equal to that given +func (o *CreateS3LinodeStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 linode storage o k response +func (o *CreateS3LinodeStorageOK) Code() int { + return 200 +} + +func (o *CreateS3LinodeStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/linode][%d] createS3LinodeStorageOK %s", 200, payload) +} + +func (o *CreateS3LinodeStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/linode][%d] createS3LinodeStorageOK %s", 200, payload) +} + +func (o *CreateS3LinodeStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3LinodeStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3LinodeStorageBadRequest creates a CreateS3LinodeStorageBadRequest with default headers values +func NewCreateS3LinodeStorageBadRequest() *CreateS3LinodeStorageBadRequest { + return &CreateS3LinodeStorageBadRequest{} +} + +/* +CreateS3LinodeStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3LinodeStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 linode storage bad request response has a 2xx status code +func (o *CreateS3LinodeStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 linode storage bad request response has a 3xx status code +func (o *CreateS3LinodeStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 linode storage bad request response has a 4xx status code +func (o *CreateS3LinodeStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 linode storage bad request response has a 5xx status code +func (o *CreateS3LinodeStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 linode storage bad request response a status code equal to that given +func (o *CreateS3LinodeStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 linode storage bad request response +func (o *CreateS3LinodeStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3LinodeStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/linode][%d] createS3LinodeStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3LinodeStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/linode][%d] createS3LinodeStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3LinodeStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3LinodeStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3LinodeStorageInternalServerError creates a CreateS3LinodeStorageInternalServerError with default headers values +func NewCreateS3LinodeStorageInternalServerError() *CreateS3LinodeStorageInternalServerError { + return &CreateS3LinodeStorageInternalServerError{} +} + +/* +CreateS3LinodeStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3LinodeStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 linode storage internal server error response has a 2xx status code +func (o *CreateS3LinodeStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 linode storage internal server error response has a 3xx status code +func (o *CreateS3LinodeStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 linode storage internal server error response has a 4xx status code +func (o *CreateS3LinodeStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 linode storage internal server error response has a 5xx status code +func (o *CreateS3LinodeStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 linode storage internal server error response a status code equal to that given +func (o *CreateS3LinodeStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 linode storage internal server error response +func (o *CreateS3LinodeStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3LinodeStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/linode][%d] createS3LinodeStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3LinodeStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/linode][%d] createS3LinodeStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3LinodeStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3LinodeStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_lyve_cloud_storage_responses.go b/client/swagger/http/storage/create_s3_lyve_cloud_storage_responses.go index 7ea669a6..03a65291 100644 --- a/client/swagger/http/storage/create_s3_lyve_cloud_storage_responses.go +++ b/client/swagger/http/storage/create_s3_lyve_cloud_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3LyveCloudStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3LyveCloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3LyveCloudStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3LyveCloudStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3LyveCloudStorageOK) readResponse(response runtime.ClientRespons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3LyveCloudStorageBadRequest) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3LyveCloudStorageInternalServerError) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_magalu_storage_parameters.go b/client/swagger/http/storage/create_s3_magalu_storage_parameters.go new file mode 100644 index 00000000..6f84d6bf --- /dev/null +++ b/client/swagger/http/storage/create_s3_magalu_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3MagaluStorageParams creates a new CreateS3MagaluStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3MagaluStorageParams() *CreateS3MagaluStorageParams { + return &CreateS3MagaluStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3MagaluStorageParamsWithTimeout creates a new CreateS3MagaluStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3MagaluStorageParamsWithTimeout(timeout time.Duration) *CreateS3MagaluStorageParams { + return &CreateS3MagaluStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3MagaluStorageParamsWithContext creates a new CreateS3MagaluStorageParams object +// with the ability to set a context for a request. +func NewCreateS3MagaluStorageParamsWithContext(ctx context.Context) *CreateS3MagaluStorageParams { + return &CreateS3MagaluStorageParams{ + Context: ctx, + } +} + +// NewCreateS3MagaluStorageParamsWithHTTPClient creates a new CreateS3MagaluStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3MagaluStorageParamsWithHTTPClient(client *http.Client) *CreateS3MagaluStorageParams { + return &CreateS3MagaluStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3MagaluStorageParams contains all the parameters to send to the API endpoint + + for the create s3 magalu storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3MagaluStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3MagaluStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 magalu storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3MagaluStorageParams) WithDefaults() *CreateS3MagaluStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 magalu storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3MagaluStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) WithTimeout(timeout time.Duration) *CreateS3MagaluStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) WithContext(ctx context.Context) *CreateS3MagaluStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) WithHTTPClient(client *http.Client) *CreateS3MagaluStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) WithRequest(request *models.StorageCreateS3MagaluStorageRequest) *CreateS3MagaluStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 magalu storage params +func (o *CreateS3MagaluStorageParams) SetRequest(request *models.StorageCreateS3MagaluStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3MagaluStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_magalu_storage_responses.go b/client/swagger/http/storage/create_s3_magalu_storage_responses.go new file mode 100644 index 00000000..d345314d --- /dev/null +++ b/client/swagger/http/storage/create_s3_magalu_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3MagaluStorageReader is a Reader for the CreateS3MagaluStorage structure. +type CreateS3MagaluStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3MagaluStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3MagaluStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3MagaluStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3MagaluStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/magalu] CreateS3MagaluStorage", response, response.Code()) + } +} + +// NewCreateS3MagaluStorageOK creates a CreateS3MagaluStorageOK with default headers values +func NewCreateS3MagaluStorageOK() *CreateS3MagaluStorageOK { + return &CreateS3MagaluStorageOK{} +} + +/* +CreateS3MagaluStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3MagaluStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 magalu storage o k response has a 2xx status code +func (o *CreateS3MagaluStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 magalu storage o k response has a 3xx status code +func (o *CreateS3MagaluStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 magalu storage o k response has a 4xx status code +func (o *CreateS3MagaluStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 magalu storage o k response has a 5xx status code +func (o *CreateS3MagaluStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 magalu storage o k response a status code equal to that given +func (o *CreateS3MagaluStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 magalu storage o k response +func (o *CreateS3MagaluStorageOK) Code() int { + return 200 +} + +func (o *CreateS3MagaluStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/magalu][%d] createS3MagaluStorageOK %s", 200, payload) +} + +func (o *CreateS3MagaluStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/magalu][%d] createS3MagaluStorageOK %s", 200, payload) +} + +func (o *CreateS3MagaluStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3MagaluStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3MagaluStorageBadRequest creates a CreateS3MagaluStorageBadRequest with default headers values +func NewCreateS3MagaluStorageBadRequest() *CreateS3MagaluStorageBadRequest { + return &CreateS3MagaluStorageBadRequest{} +} + +/* +CreateS3MagaluStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3MagaluStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 magalu storage bad request response has a 2xx status code +func (o *CreateS3MagaluStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 magalu storage bad request response has a 3xx status code +func (o *CreateS3MagaluStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 magalu storage bad request response has a 4xx status code +func (o *CreateS3MagaluStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 magalu storage bad request response has a 5xx status code +func (o *CreateS3MagaluStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 magalu storage bad request response a status code equal to that given +func (o *CreateS3MagaluStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 magalu storage bad request response +func (o *CreateS3MagaluStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3MagaluStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/magalu][%d] createS3MagaluStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3MagaluStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/magalu][%d] createS3MagaluStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3MagaluStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3MagaluStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3MagaluStorageInternalServerError creates a CreateS3MagaluStorageInternalServerError with default headers values +func NewCreateS3MagaluStorageInternalServerError() *CreateS3MagaluStorageInternalServerError { + return &CreateS3MagaluStorageInternalServerError{} +} + +/* +CreateS3MagaluStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3MagaluStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 magalu storage internal server error response has a 2xx status code +func (o *CreateS3MagaluStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 magalu storage internal server error response has a 3xx status code +func (o *CreateS3MagaluStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 magalu storage internal server error response has a 4xx status code +func (o *CreateS3MagaluStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 magalu storage internal server error response has a 5xx status code +func (o *CreateS3MagaluStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 magalu storage internal server error response a status code equal to that given +func (o *CreateS3MagaluStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 magalu storage internal server error response +func (o *CreateS3MagaluStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3MagaluStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/magalu][%d] createS3MagaluStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3MagaluStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/magalu][%d] createS3MagaluStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3MagaluStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3MagaluStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_minio_storage_responses.go b/client/swagger/http/storage/create_s3_minio_storage_responses.go index 66bf2801..d52dcb94 100644 --- a/client/swagger/http/storage/create_s3_minio_storage_responses.go +++ b/client/swagger/http/storage/create_s3_minio_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3MinioStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3MinioStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3MinioStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3MinioStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3MinioStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3MinioStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3MinioStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_netease_storage_responses.go b/client/swagger/http/storage/create_s3_netease_storage_responses.go index dc865b72..3cf4f933 100644 --- a/client/swagger/http/storage/create_s3_netease_storage_responses.go +++ b/client/swagger/http/storage/create_s3_netease_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3NeteaseStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3NeteaseStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3NeteaseStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3NeteaseStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3NeteaseStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3NeteaseStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3NeteaseStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_other_storage_responses.go b/client/swagger/http/storage/create_s3_other_storage_responses.go index 1240c3a2..2681b5e7 100644 --- a/client/swagger/http/storage/create_s3_other_storage_responses.go +++ b/client/swagger/http/storage/create_s3_other_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3OtherStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3OtherStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3OtherStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3OtherStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3OtherStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3OtherStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3OtherStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_petabox_storage_parameters.go b/client/swagger/http/storage/create_s3_petabox_storage_parameters.go new file mode 100644 index 00000000..b7ca2203 --- /dev/null +++ b/client/swagger/http/storage/create_s3_petabox_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3PetaboxStorageParams creates a new CreateS3PetaboxStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3PetaboxStorageParams() *CreateS3PetaboxStorageParams { + return &CreateS3PetaboxStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3PetaboxStorageParamsWithTimeout creates a new CreateS3PetaboxStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3PetaboxStorageParamsWithTimeout(timeout time.Duration) *CreateS3PetaboxStorageParams { + return &CreateS3PetaboxStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3PetaboxStorageParamsWithContext creates a new CreateS3PetaboxStorageParams object +// with the ability to set a context for a request. +func NewCreateS3PetaboxStorageParamsWithContext(ctx context.Context) *CreateS3PetaboxStorageParams { + return &CreateS3PetaboxStorageParams{ + Context: ctx, + } +} + +// NewCreateS3PetaboxStorageParamsWithHTTPClient creates a new CreateS3PetaboxStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3PetaboxStorageParamsWithHTTPClient(client *http.Client) *CreateS3PetaboxStorageParams { + return &CreateS3PetaboxStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3PetaboxStorageParams contains all the parameters to send to the API endpoint + + for the create s3 petabox storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3PetaboxStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3PetaboxStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 petabox storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3PetaboxStorageParams) WithDefaults() *CreateS3PetaboxStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 petabox storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3PetaboxStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) WithTimeout(timeout time.Duration) *CreateS3PetaboxStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) WithContext(ctx context.Context) *CreateS3PetaboxStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) WithHTTPClient(client *http.Client) *CreateS3PetaboxStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) WithRequest(request *models.StorageCreateS3PetaboxStorageRequest) *CreateS3PetaboxStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 petabox storage params +func (o *CreateS3PetaboxStorageParams) SetRequest(request *models.StorageCreateS3PetaboxStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3PetaboxStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_petabox_storage_responses.go b/client/swagger/http/storage/create_s3_petabox_storage_responses.go new file mode 100644 index 00000000..36c0a731 --- /dev/null +++ b/client/swagger/http/storage/create_s3_petabox_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3PetaboxStorageReader is a Reader for the CreateS3PetaboxStorage structure. +type CreateS3PetaboxStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3PetaboxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3PetaboxStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3PetaboxStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3PetaboxStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/petabox] CreateS3PetaboxStorage", response, response.Code()) + } +} + +// NewCreateS3PetaboxStorageOK creates a CreateS3PetaboxStorageOK with default headers values +func NewCreateS3PetaboxStorageOK() *CreateS3PetaboxStorageOK { + return &CreateS3PetaboxStorageOK{} +} + +/* +CreateS3PetaboxStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3PetaboxStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 petabox storage o k response has a 2xx status code +func (o *CreateS3PetaboxStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 petabox storage o k response has a 3xx status code +func (o *CreateS3PetaboxStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 petabox storage o k response has a 4xx status code +func (o *CreateS3PetaboxStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 petabox storage o k response has a 5xx status code +func (o *CreateS3PetaboxStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 petabox storage o k response a status code equal to that given +func (o *CreateS3PetaboxStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 petabox storage o k response +func (o *CreateS3PetaboxStorageOK) Code() int { + return 200 +} + +func (o *CreateS3PetaboxStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/petabox][%d] createS3PetaboxStorageOK %s", 200, payload) +} + +func (o *CreateS3PetaboxStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/petabox][%d] createS3PetaboxStorageOK %s", 200, payload) +} + +func (o *CreateS3PetaboxStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3PetaboxStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3PetaboxStorageBadRequest creates a CreateS3PetaboxStorageBadRequest with default headers values +func NewCreateS3PetaboxStorageBadRequest() *CreateS3PetaboxStorageBadRequest { + return &CreateS3PetaboxStorageBadRequest{} +} + +/* +CreateS3PetaboxStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3PetaboxStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 petabox storage bad request response has a 2xx status code +func (o *CreateS3PetaboxStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 petabox storage bad request response has a 3xx status code +func (o *CreateS3PetaboxStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 petabox storage bad request response has a 4xx status code +func (o *CreateS3PetaboxStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 petabox storage bad request response has a 5xx status code +func (o *CreateS3PetaboxStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 petabox storage bad request response a status code equal to that given +func (o *CreateS3PetaboxStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 petabox storage bad request response +func (o *CreateS3PetaboxStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3PetaboxStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/petabox][%d] createS3PetaboxStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3PetaboxStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/petabox][%d] createS3PetaboxStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3PetaboxStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3PetaboxStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3PetaboxStorageInternalServerError creates a CreateS3PetaboxStorageInternalServerError with default headers values +func NewCreateS3PetaboxStorageInternalServerError() *CreateS3PetaboxStorageInternalServerError { + return &CreateS3PetaboxStorageInternalServerError{} +} + +/* +CreateS3PetaboxStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3PetaboxStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 petabox storage internal server error response has a 2xx status code +func (o *CreateS3PetaboxStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 petabox storage internal server error response has a 3xx status code +func (o *CreateS3PetaboxStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 petabox storage internal server error response has a 4xx status code +func (o *CreateS3PetaboxStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 petabox storage internal server error response has a 5xx status code +func (o *CreateS3PetaboxStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 petabox storage internal server error response a status code equal to that given +func (o *CreateS3PetaboxStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 petabox storage internal server error response +func (o *CreateS3PetaboxStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3PetaboxStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/petabox][%d] createS3PetaboxStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3PetaboxStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/petabox][%d] createS3PetaboxStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3PetaboxStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3PetaboxStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_qiniu_storage_responses.go b/client/swagger/http/storage/create_s3_qiniu_storage_responses.go index 6e799c30..d1e4fd80 100644 --- a/client/swagger/http/storage/create_s3_qiniu_storage_responses.go +++ b/client/swagger/http/storage/create_s3_qiniu_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3QiniuStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3QiniuStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3QiniuStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3QiniuStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3QiniuStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3QiniuStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3QiniuStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_rack_corp_storage_responses.go b/client/swagger/http/storage/create_s3_rack_corp_storage_responses.go index 03f570f5..0ba865e4 100644 --- a/client/swagger/http/storage/create_s3_rack_corp_storage_responses.go +++ b/client/swagger/http/storage/create_s3_rack_corp_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3RackCorpStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3RackCorpStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3RackCorpStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3RackCorpStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3RackCorpStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3RackCorpStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3RackCorpStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_rclone_storage_parameters.go b/client/swagger/http/storage/create_s3_rclone_storage_parameters.go new file mode 100644 index 00000000..2354c30a --- /dev/null +++ b/client/swagger/http/storage/create_s3_rclone_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3RcloneStorageParams creates a new CreateS3RcloneStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3RcloneStorageParams() *CreateS3RcloneStorageParams { + return &CreateS3RcloneStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3RcloneStorageParamsWithTimeout creates a new CreateS3RcloneStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3RcloneStorageParamsWithTimeout(timeout time.Duration) *CreateS3RcloneStorageParams { + return &CreateS3RcloneStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3RcloneStorageParamsWithContext creates a new CreateS3RcloneStorageParams object +// with the ability to set a context for a request. +func NewCreateS3RcloneStorageParamsWithContext(ctx context.Context) *CreateS3RcloneStorageParams { + return &CreateS3RcloneStorageParams{ + Context: ctx, + } +} + +// NewCreateS3RcloneStorageParamsWithHTTPClient creates a new CreateS3RcloneStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3RcloneStorageParamsWithHTTPClient(client *http.Client) *CreateS3RcloneStorageParams { + return &CreateS3RcloneStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3RcloneStorageParams contains all the parameters to send to the API endpoint + + for the create s3 rclone storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3RcloneStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3RcloneStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 rclone storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3RcloneStorageParams) WithDefaults() *CreateS3RcloneStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 rclone storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3RcloneStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) WithTimeout(timeout time.Duration) *CreateS3RcloneStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) WithContext(ctx context.Context) *CreateS3RcloneStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) WithHTTPClient(client *http.Client) *CreateS3RcloneStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) WithRequest(request *models.StorageCreateS3RcloneStorageRequest) *CreateS3RcloneStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 rclone storage params +func (o *CreateS3RcloneStorageParams) SetRequest(request *models.StorageCreateS3RcloneStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3RcloneStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_rclone_storage_responses.go b/client/swagger/http/storage/create_s3_rclone_storage_responses.go new file mode 100644 index 00000000..17f47278 --- /dev/null +++ b/client/swagger/http/storage/create_s3_rclone_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3RcloneStorageReader is a Reader for the CreateS3RcloneStorage structure. +type CreateS3RcloneStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3RcloneStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3RcloneStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3RcloneStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3RcloneStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/rclone] CreateS3RcloneStorage", response, response.Code()) + } +} + +// NewCreateS3RcloneStorageOK creates a CreateS3RcloneStorageOK with default headers values +func NewCreateS3RcloneStorageOK() *CreateS3RcloneStorageOK { + return &CreateS3RcloneStorageOK{} +} + +/* +CreateS3RcloneStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3RcloneStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 rclone storage o k response has a 2xx status code +func (o *CreateS3RcloneStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 rclone storage o k response has a 3xx status code +func (o *CreateS3RcloneStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 rclone storage o k response has a 4xx status code +func (o *CreateS3RcloneStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 rclone storage o k response has a 5xx status code +func (o *CreateS3RcloneStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 rclone storage o k response a status code equal to that given +func (o *CreateS3RcloneStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 rclone storage o k response +func (o *CreateS3RcloneStorageOK) Code() int { + return 200 +} + +func (o *CreateS3RcloneStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rclone][%d] createS3RcloneStorageOK %s", 200, payload) +} + +func (o *CreateS3RcloneStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rclone][%d] createS3RcloneStorageOK %s", 200, payload) +} + +func (o *CreateS3RcloneStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3RcloneStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3RcloneStorageBadRequest creates a CreateS3RcloneStorageBadRequest with default headers values +func NewCreateS3RcloneStorageBadRequest() *CreateS3RcloneStorageBadRequest { + return &CreateS3RcloneStorageBadRequest{} +} + +/* +CreateS3RcloneStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3RcloneStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 rclone storage bad request response has a 2xx status code +func (o *CreateS3RcloneStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 rclone storage bad request response has a 3xx status code +func (o *CreateS3RcloneStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 rclone storage bad request response has a 4xx status code +func (o *CreateS3RcloneStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 rclone storage bad request response has a 5xx status code +func (o *CreateS3RcloneStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 rclone storage bad request response a status code equal to that given +func (o *CreateS3RcloneStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 rclone storage bad request response +func (o *CreateS3RcloneStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3RcloneStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rclone][%d] createS3RcloneStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3RcloneStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rclone][%d] createS3RcloneStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3RcloneStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3RcloneStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3RcloneStorageInternalServerError creates a CreateS3RcloneStorageInternalServerError with default headers values +func NewCreateS3RcloneStorageInternalServerError() *CreateS3RcloneStorageInternalServerError { + return &CreateS3RcloneStorageInternalServerError{} +} + +/* +CreateS3RcloneStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3RcloneStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 rclone storage internal server error response has a 2xx status code +func (o *CreateS3RcloneStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 rclone storage internal server error response has a 3xx status code +func (o *CreateS3RcloneStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 rclone storage internal server error response has a 4xx status code +func (o *CreateS3RcloneStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 rclone storage internal server error response has a 5xx status code +func (o *CreateS3RcloneStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 rclone storage internal server error response a status code equal to that given +func (o *CreateS3RcloneStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 rclone storage internal server error response +func (o *CreateS3RcloneStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3RcloneStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rclone][%d] createS3RcloneStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3RcloneStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/rclone][%d] createS3RcloneStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3RcloneStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3RcloneStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_scaleway_storage_responses.go b/client/swagger/http/storage/create_s3_scaleway_storage_responses.go index 208e5d94..13bc37b2 100644 --- a/client/swagger/http/storage/create_s3_scaleway_storage_responses.go +++ b/client/swagger/http/storage/create_s3_scaleway_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3ScalewayStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3ScalewayStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3ScalewayStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3ScalewayStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3ScalewayStorageOK) readResponse(response runtime.ClientResponse o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3ScalewayStorageBadRequest) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3ScalewayStorageInternalServerError) readResponse(response runti o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_seaweed_f_s_storage_responses.go b/client/swagger/http/storage/create_s3_seaweed_f_s_storage_responses.go index fecc0392..ef2d2407 100644 --- a/client/swagger/http/storage/create_s3_seaweed_f_s_storage_responses.go +++ b/client/swagger/http/storage/create_s3_seaweed_f_s_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3SeaweedFSStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3SeaweedFSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3SeaweedFSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3SeaweedFSStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3SeaweedFSStorageOK) readResponse(response runtime.ClientRespons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3SeaweedFSStorageBadRequest) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3SeaweedFSStorageInternalServerError) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_stack_path_storage_responses.go b/client/swagger/http/storage/create_s3_stack_path_storage_responses.go index ebef892b..1bea30a7 100644 --- a/client/swagger/http/storage/create_s3_stack_path_storage_responses.go +++ b/client/swagger/http/storage/create_s3_stack_path_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3StackPathStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3StackPathStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3StackPathStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3StackPathStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3StackPathStorageOK) readResponse(response runtime.ClientRespons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3StackPathStorageBadRequest) readResponse(response runtime.Clien o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3StackPathStorageInternalServerError) readResponse(response runt o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_storj_storage_responses.go b/client/swagger/http/storage/create_s3_storj_storage_responses.go index dc880013..8188616f 100644 --- a/client/swagger/http/storage/create_s3_storj_storage_responses.go +++ b/client/swagger/http/storage/create_s3_storj_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3StorjStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3StorjStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3StorjStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3StorjStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3StorjStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3StorjStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3StorjStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_synology_storage_parameters.go b/client/swagger/http/storage/create_s3_synology_storage_parameters.go new file mode 100644 index 00000000..1ecc3f0b --- /dev/null +++ b/client/swagger/http/storage/create_s3_synology_storage_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// NewCreateS3SynologyStorageParams creates a new CreateS3SynologyStorageParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateS3SynologyStorageParams() *CreateS3SynologyStorageParams { + return &CreateS3SynologyStorageParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateS3SynologyStorageParamsWithTimeout creates a new CreateS3SynologyStorageParams object +// with the ability to set a timeout on a request. +func NewCreateS3SynologyStorageParamsWithTimeout(timeout time.Duration) *CreateS3SynologyStorageParams { + return &CreateS3SynologyStorageParams{ + timeout: timeout, + } +} + +// NewCreateS3SynologyStorageParamsWithContext creates a new CreateS3SynologyStorageParams object +// with the ability to set a context for a request. +func NewCreateS3SynologyStorageParamsWithContext(ctx context.Context) *CreateS3SynologyStorageParams { + return &CreateS3SynologyStorageParams{ + Context: ctx, + } +} + +// NewCreateS3SynologyStorageParamsWithHTTPClient creates a new CreateS3SynologyStorageParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateS3SynologyStorageParamsWithHTTPClient(client *http.Client) *CreateS3SynologyStorageParams { + return &CreateS3SynologyStorageParams{ + HTTPClient: client, + } +} + +/* +CreateS3SynologyStorageParams contains all the parameters to send to the API endpoint + + for the create s3 synology storage operation. + + Typically these are written to a http.Request. +*/ +type CreateS3SynologyStorageParams struct { + + /* Request. + + Request body + */ + Request *models.StorageCreateS3SynologyStorageRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create s3 synology storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3SynologyStorageParams) WithDefaults() *CreateS3SynologyStorageParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create s3 synology storage params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateS3SynologyStorageParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) WithTimeout(timeout time.Duration) *CreateS3SynologyStorageParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) WithContext(ctx context.Context) *CreateS3SynologyStorageParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) WithHTTPClient(client *http.Client) *CreateS3SynologyStorageParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRequest adds the request to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) WithRequest(request *models.StorageCreateS3SynologyStorageRequest) *CreateS3SynologyStorageParams { + o.SetRequest(request) + return o +} + +// SetRequest adds the request to the create s3 synology storage params +func (o *CreateS3SynologyStorageParams) SetRequest(request *models.StorageCreateS3SynologyStorageRequest) { + o.Request = request +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateS3SynologyStorageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Request != nil { + if err := r.SetBodyParam(o.Request); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/swagger/http/storage/create_s3_synology_storage_responses.go b/client/swagger/http/storage/create_s3_synology_storage_responses.go new file mode 100644 index 00000000..2f3882c5 --- /dev/null +++ b/client/swagger/http/storage/create_s3_synology_storage_responses.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/data-preservation-programs/singularity/client/swagger/models" +) + +// CreateS3SynologyStorageReader is a Reader for the CreateS3SynologyStorage structure. +type CreateS3SynologyStorageReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateS3SynologyStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewCreateS3SynologyStorageOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateS3SynologyStorageBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateS3SynologyStorageInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /storage/s3/synology] CreateS3SynologyStorage", response, response.Code()) + } +} + +// NewCreateS3SynologyStorageOK creates a CreateS3SynologyStorageOK with default headers values +func NewCreateS3SynologyStorageOK() *CreateS3SynologyStorageOK { + return &CreateS3SynologyStorageOK{} +} + +/* +CreateS3SynologyStorageOK describes a response with status code 200, with default header values. + +OK +*/ +type CreateS3SynologyStorageOK struct { + Payload *models.ModelStorage +} + +// IsSuccess returns true when this create s3 synology storage o k response has a 2xx status code +func (o *CreateS3SynologyStorageOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create s3 synology storage o k response has a 3xx status code +func (o *CreateS3SynologyStorageOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 synology storage o k response has a 4xx status code +func (o *CreateS3SynologyStorageOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 synology storage o k response has a 5xx status code +func (o *CreateS3SynologyStorageOK) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 synology storage o k response a status code equal to that given +func (o *CreateS3SynologyStorageOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the create s3 synology storage o k response +func (o *CreateS3SynologyStorageOK) Code() int { + return 200 +} + +func (o *CreateS3SynologyStorageOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/synology][%d] createS3SynologyStorageOK %s", 200, payload) +} + +func (o *CreateS3SynologyStorageOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/synology][%d] createS3SynologyStorageOK %s", 200, payload) +} + +func (o *CreateS3SynologyStorageOK) GetPayload() *models.ModelStorage { + return o.Payload +} + +func (o *CreateS3SynologyStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModelStorage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3SynologyStorageBadRequest creates a CreateS3SynologyStorageBadRequest with default headers values +func NewCreateS3SynologyStorageBadRequest() *CreateS3SynologyStorageBadRequest { + return &CreateS3SynologyStorageBadRequest{} +} + +/* +CreateS3SynologyStorageBadRequest describes a response with status code 400, with default header values. + +Bad Request +*/ +type CreateS3SynologyStorageBadRequest struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 synology storage bad request response has a 2xx status code +func (o *CreateS3SynologyStorageBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 synology storage bad request response has a 3xx status code +func (o *CreateS3SynologyStorageBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 synology storage bad request response has a 4xx status code +func (o *CreateS3SynologyStorageBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create s3 synology storage bad request response has a 5xx status code +func (o *CreateS3SynologyStorageBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create s3 synology storage bad request response a status code equal to that given +func (o *CreateS3SynologyStorageBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create s3 synology storage bad request response +func (o *CreateS3SynologyStorageBadRequest) Code() int { + return 400 +} + +func (o *CreateS3SynologyStorageBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/synology][%d] createS3SynologyStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3SynologyStorageBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/synology][%d] createS3SynologyStorageBadRequest %s", 400, payload) +} + +func (o *CreateS3SynologyStorageBadRequest) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3SynologyStorageBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateS3SynologyStorageInternalServerError creates a CreateS3SynologyStorageInternalServerError with default headers values +func NewCreateS3SynologyStorageInternalServerError() *CreateS3SynologyStorageInternalServerError { + return &CreateS3SynologyStorageInternalServerError{} +} + +/* +CreateS3SynologyStorageInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type CreateS3SynologyStorageInternalServerError struct { + Payload *models.APIHTTPError +} + +// IsSuccess returns true when this create s3 synology storage internal server error response has a 2xx status code +func (o *CreateS3SynologyStorageInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create s3 synology storage internal server error response has a 3xx status code +func (o *CreateS3SynologyStorageInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create s3 synology storage internal server error response has a 4xx status code +func (o *CreateS3SynologyStorageInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create s3 synology storage internal server error response has a 5xx status code +func (o *CreateS3SynologyStorageInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create s3 synology storage internal server error response a status code equal to that given +func (o *CreateS3SynologyStorageInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create s3 synology storage internal server error response +func (o *CreateS3SynologyStorageInternalServerError) Code() int { + return 500 +} + +func (o *CreateS3SynologyStorageInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/synology][%d] createS3SynologyStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3SynologyStorageInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /storage/s3/synology][%d] createS3SynologyStorageInternalServerError %s", 500, payload) +} + +func (o *CreateS3SynologyStorageInternalServerError) GetPayload() *models.APIHTTPError { + return o.Payload +} + +func (o *CreateS3SynologyStorageInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.APIHTTPError) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/client/swagger/http/storage/create_s3_tencent_c_o_s_storage_responses.go b/client/swagger/http/storage/create_s3_tencent_c_o_s_storage_responses.go index 75c64fa3..0b3c758f 100644 --- a/client/swagger/http/storage/create_s3_tencent_c_o_s_storage_responses.go +++ b/client/swagger/http/storage/create_s3_tencent_c_o_s_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3TencentCOSStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3TencentCOSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3TencentCOSStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3TencentCOSStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3TencentCOSStorageOK) readResponse(response runtime.ClientRespon o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3TencentCOSStorageBadRequest) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3TencentCOSStorageInternalServerError) readResponse(response run o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_s3_wasabi_storage_responses.go b/client/swagger/http/storage/create_s3_wasabi_storage_responses.go index e0b2c67d..a3df49ca 100644 --- a/client/swagger/http/storage/create_s3_wasabi_storage_responses.go +++ b/client/swagger/http/storage/create_s3_wasabi_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateS3WasabiStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateS3WasabiStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateS3WasabiStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateS3WasabiStorageOK() @@ -110,7 +111,7 @@ func (o *CreateS3WasabiStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateS3WasabiStorageBadRequest) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateS3WasabiStorageInternalServerError) readResponse(response runtime o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_seafile_storage_responses.go b/client/swagger/http/storage/create_seafile_storage_responses.go index 168a191d..c2feae8c 100644 --- a/client/swagger/http/storage/create_seafile_storage_responses.go +++ b/client/swagger/http/storage/create_seafile_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSeafileStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSeafileStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSeafileStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSeafileStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSeafileStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSeafileStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSeafileStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_sftp_storage_responses.go b/client/swagger/http/storage/create_sftp_storage_responses.go index cb4f4dbd..f1de3de4 100644 --- a/client/swagger/http/storage/create_sftp_storage_responses.go +++ b/client/swagger/http/storage/create_sftp_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSftpStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSftpStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSftpStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSftpStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSftpStorageOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSftpStorageBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSftpStorageInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_sharefile_storage_responses.go b/client/swagger/http/storage/create_sharefile_storage_responses.go index fc61e7ca..388e083d 100644 --- a/client/swagger/http/storage/create_sharefile_storage_responses.go +++ b/client/swagger/http/storage/create_sharefile_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSharefileStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSharefileStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSharefileStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSharefileStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSharefileStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSharefileStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSharefileStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_sia_storage_responses.go b/client/swagger/http/storage/create_sia_storage_responses.go index 9c054476..9ea6a54a 100644 --- a/client/swagger/http/storage/create_sia_storage_responses.go +++ b/client/swagger/http/storage/create_sia_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSiaStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSiaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSiaStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSiaStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSiaStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSiaStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSiaStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_smb_storage_responses.go b/client/swagger/http/storage/create_smb_storage_responses.go index 0327355c..6bda8700 100644 --- a/client/swagger/http/storage/create_smb_storage_responses.go +++ b/client/swagger/http/storage/create_smb_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSmbStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSmbStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSmbStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSmbStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSmbStorageOK) readResponse(response runtime.ClientResponse, consu o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSmbStorageBadRequest) readResponse(response runtime.ClientRespons o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSmbStorageInternalServerError) readResponse(response runtime.Clie o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_storj_existing_storage_responses.go b/client/swagger/http/storage/create_storj_existing_storage_responses.go index 1ba6110c..6d15f0cc 100644 --- a/client/swagger/http/storage/create_storj_existing_storage_responses.go +++ b/client/swagger/http/storage/create_storj_existing_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateStorjExistingStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateStorjExistingStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateStorjExistingStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateStorjExistingStorageOK() @@ -110,7 +111,7 @@ func (o *CreateStorjExistingStorageOK) readResponse(response runtime.ClientRespo o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateStorjExistingStorageBadRequest) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateStorjExistingStorageInternalServerError) readResponse(response ru o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_storj_new_storage_responses.go b/client/swagger/http/storage/create_storj_new_storage_responses.go index 2ab4f6d8..e562e090 100644 --- a/client/swagger/http/storage/create_storj_new_storage_responses.go +++ b/client/swagger/http/storage/create_storj_new_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateStorjNewStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateStorjNewStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateStorjNewStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateStorjNewStorageOK() @@ -110,7 +111,7 @@ func (o *CreateStorjNewStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateStorjNewStorageBadRequest) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateStorjNewStorageInternalServerError) readResponse(response runtime o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_sugarsync_storage_responses.go b/client/swagger/http/storage/create_sugarsync_storage_responses.go index 7d574e7b..cf3e86e6 100644 --- a/client/swagger/http/storage/create_sugarsync_storage_responses.go +++ b/client/swagger/http/storage/create_sugarsync_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSugarsyncStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSugarsyncStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSugarsyncStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSugarsyncStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSugarsyncStorageOK) readResponse(response runtime.ClientResponse, o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSugarsyncStorageBadRequest) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSugarsyncStorageInternalServerError) readResponse(response runtim o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_swift_storage_responses.go b/client/swagger/http/storage/create_swift_storage_responses.go index 443e3d65..2fa1993a 100644 --- a/client/swagger/http/storage/create_swift_storage_responses.go +++ b/client/swagger/http/storage/create_swift_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateSwiftStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateSwiftStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateSwiftStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateSwiftStorageOK() @@ -110,7 +111,7 @@ func (o *CreateSwiftStorageOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateSwiftStorageBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateSwiftStorageInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_union_storage_responses.go b/client/swagger/http/storage/create_union_storage_responses.go index bb642f59..ca71878f 100644 --- a/client/swagger/http/storage/create_union_storage_responses.go +++ b/client/swagger/http/storage/create_union_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateUnionStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateUnionStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateUnionStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateUnionStorageOK() @@ -110,7 +111,7 @@ func (o *CreateUnionStorageOK) readResponse(response runtime.ClientResponse, con o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateUnionStorageBadRequest) readResponse(response runtime.ClientRespo o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateUnionStorageInternalServerError) readResponse(response runtime.Cl o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_uptobox_storage_responses.go b/client/swagger/http/storage/create_uptobox_storage_responses.go index 5194640c..3893469b 100644 --- a/client/swagger/http/storage/create_uptobox_storage_responses.go +++ b/client/swagger/http/storage/create_uptobox_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateUptoboxStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateUptoboxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateUptoboxStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateUptoboxStorageOK() @@ -110,7 +111,7 @@ func (o *CreateUptoboxStorageOK) readResponse(response runtime.ClientResponse, c o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateUptoboxStorageBadRequest) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateUptoboxStorageInternalServerError) readResponse(response runtime. o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_webdav_storage_responses.go b/client/swagger/http/storage/create_webdav_storage_responses.go index 8fdbe611..23919411 100644 --- a/client/swagger/http/storage/create_webdav_storage_responses.go +++ b/client/swagger/http/storage/create_webdav_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateWebdavStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateWebdavStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateWebdavStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateWebdavStorageOK() @@ -110,7 +111,7 @@ func (o *CreateWebdavStorageOK) readResponse(response runtime.ClientResponse, co o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateWebdavStorageBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateWebdavStorageInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_yandex_storage_responses.go b/client/swagger/http/storage/create_yandex_storage_responses.go index 74a7bce7..5fc6b69a 100644 --- a/client/swagger/http/storage/create_yandex_storage_responses.go +++ b/client/swagger/http/storage/create_yandex_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateYandexStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateYandexStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateYandexStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateYandexStorageOK() @@ -110,7 +111,7 @@ func (o *CreateYandexStorageOK) readResponse(response runtime.ClientResponse, co o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateYandexStorageBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateYandexStorageInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/create_zoho_storage_responses.go b/client/swagger/http/storage/create_zoho_storage_responses.go index 59516490..e32aaaba 100644 --- a/client/swagger/http/storage/create_zoho_storage_responses.go +++ b/client/swagger/http/storage/create_zoho_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type CreateZohoStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *CreateZohoStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *CreateZohoStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewCreateZohoStorageOK() @@ -110,7 +111,7 @@ func (o *CreateZohoStorageOK) readResponse(response runtime.ClientResponse, cons o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *CreateZohoStorageBadRequest) readResponse(response runtime.ClientRespon o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *CreateZohoStorageInternalServerError) readResponse(response runtime.Cli o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/explore_storage_responses.go b/client/swagger/http/storage/explore_storage_responses.go index 6e260a91..eb94d5e2 100644 --- a/client/swagger/http/storage/explore_storage_responses.go +++ b/client/swagger/http/storage/explore_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ExploreStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *ExploreStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ExploreStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewExploreStorageOK() @@ -108,7 +109,7 @@ func (o *ExploreStorageOK) GetPayload() []*models.StorageDirEntry { func (o *ExploreStorageOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ExploreStorageBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ExploreStorageInternalServerError) readResponse(response runtime.Client o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/list_storages_responses.go b/client/swagger/http/storage/list_storages_responses.go index c2850ab8..434822d0 100644 --- a/client/swagger/http/storage/list_storages_responses.go +++ b/client/swagger/http/storage/list_storages_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListStoragesReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListStoragesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListStoragesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListStoragesOK() @@ -108,7 +109,7 @@ func (o *ListStoragesOK) GetPayload() []*models.ModelStorage { func (o *ListStoragesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListStoragesBadRequest) readResponse(response runtime.ClientResponse, c o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListStoragesInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/remove_storage_responses.go b/client/swagger/http/storage/remove_storage_responses.go index 282b9db4..218f0a98 100644 --- a/client/swagger/http/storage/remove_storage_responses.go +++ b/client/swagger/http/storage/remove_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RemoveStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *RemoveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RemoveStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 204: result := NewRemoveStorageNoContent() @@ -166,7 +167,7 @@ func (o *RemoveStorageBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -236,7 +237,7 @@ func (o *RemoveStorageInternalServerError) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/rename_storage_responses.go b/client/swagger/http/storage/rename_storage_responses.go index 39bd60da..8675a453 100644 --- a/client/swagger/http/storage/rename_storage_responses.go +++ b/client/swagger/http/storage/rename_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RenameStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *RenameStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RenameStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewRenameStorageOK() @@ -110,7 +111,7 @@ func (o *RenameStorageOK) readResponse(response runtime.ClientResponse, consumer o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *RenameStorageBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *RenameStorageInternalServerError) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/storage/storage_client.go b/client/swagger/http/storage/storage_client.go index 1630003e..b7832da0 100644 --- a/client/swagger/http/storage/storage_client.go +++ b/client/swagger/http/storage/storage_client.go @@ -56,8 +56,6 @@ type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods type ClientService interface { - CreateAcdStorage(params *CreateAcdStorageParams, opts ...ClientOption) (*CreateAcdStorageOK, error) - CreateAzureblobStorage(params *CreateAzureblobStorageParams, opts ...ClientOption) (*CreateAzureblobStorageOK, error) CreateB2Storage(params *CreateB2StorageParams, opts ...ClientOption) (*CreateB2StorageOK, error) @@ -114,6 +112,8 @@ type ClientService interface { CreateOosUserPrincipalAuthStorage(params *CreateOosUserPrincipalAuthStorageParams, opts ...ClientOption) (*CreateOosUserPrincipalAuthStorageOK, error) + CreateOosWorkloadIdentityAuthStorage(params *CreateOosWorkloadIdentityAuthStorageParams, opts ...ClientOption) (*CreateOosWorkloadIdentityAuthStorageOK, error) + CreateOpendriveStorage(params *CreateOpendriveStorageParams, opts ...ClientOption) (*CreateOpendriveStorageOK, error) CreatePcloudStorage(params *CreatePcloudStorageParams, opts ...ClientOption) (*CreatePcloudStorageOK, error) @@ -140,6 +140,8 @@ type ClientService interface { CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams, opts ...ClientOption) (*CreateS3DreamhostStorageOK, error) + CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...ClientOption) (*CreateS3GCSStorageOK, error) + CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams, opts ...ClientOption) (*CreateS3HuaweiOBSStorageOK, error) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts ...ClientOption) (*CreateS3IBMCOSStorageOK, error) @@ -148,20 +150,30 @@ type ClientService interface { CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts ...ClientOption) (*CreateS3IONOSStorageOK, error) + CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts ...ClientOption) (*CreateS3LeviiaStorageOK, error) + CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts ...ClientOption) (*CreateS3LiaraStorageOK, error) + CreateS3LinodeStorage(params *CreateS3LinodeStorageParams, opts ...ClientOption) (*CreateS3LinodeStorageOK, error) + CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams, opts ...ClientOption) (*CreateS3LyveCloudStorageOK, error) + CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts ...ClientOption) (*CreateS3MagaluStorageOK, error) + CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts ...ClientOption) (*CreateS3MinioStorageOK, error) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, opts ...ClientOption) (*CreateS3NeteaseStorageOK, error) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts ...ClientOption) (*CreateS3OtherStorageOK, error) + CreateS3PetaboxStorage(params *CreateS3PetaboxStorageParams, opts ...ClientOption) (*CreateS3PetaboxStorageOK, error) + CreateS3QiniuStorage(params *CreateS3QiniuStorageParams, opts ...ClientOption) (*CreateS3QiniuStorageOK, error) CreateS3RackCorpStorage(params *CreateS3RackCorpStorageParams, opts ...ClientOption) (*CreateS3RackCorpStorageOK, error) + CreateS3RcloneStorage(params *CreateS3RcloneStorageParams, opts ...ClientOption) (*CreateS3RcloneStorageOK, error) + CreateS3ScalewayStorage(params *CreateS3ScalewayStorageParams, opts ...ClientOption) (*CreateS3ScalewayStorageOK, error) CreateS3SeaweedFSStorage(params *CreateS3SeaweedFSStorageParams, opts ...ClientOption) (*CreateS3SeaweedFSStorageOK, error) @@ -170,6 +182,8 @@ type ClientService interface { CreateS3StorjStorage(params *CreateS3StorjStorageParams, opts ...ClientOption) (*CreateS3StorjStorageOK, error) + CreateS3SynologyStorage(params *CreateS3SynologyStorageParams, opts ...ClientOption) (*CreateS3SynologyStorageOK, error) + CreateS3TencentCOSStorage(params *CreateS3TencentCOSStorageParams, opts ...ClientOption) (*CreateS3TencentCOSStorageOK, error) CreateS3WasabiStorage(params *CreateS3WasabiStorageParams, opts ...ClientOption) (*CreateS3WasabiStorageOK, error) @@ -215,49 +229,11 @@ type ClientService interface { SetTransport(transport runtime.ClientTransport) } -/* -CreateAcdStorage creates acd storage -*/ -func (a *Client) CreateAcdStorage(params *CreateAcdStorageParams, opts ...ClientOption) (*CreateAcdStorageOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewCreateAcdStorageParams() - } - op := &runtime.ClientOperation{ - ID: "CreateAcdStorage", - Method: "POST", - PathPattern: "/storage/acd", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &CreateAcdStorageReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*CreateAcdStorageOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateAcdStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - /* CreateAzureblobStorage creates azureblob storage */ func (a *Client) CreateAzureblobStorage(params *CreateAzureblobStorageParams, opts ...ClientOption) (*CreateAzureblobStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateAzureblobStorageParams() } @@ -276,17 +252,22 @@ func (a *Client) CreateAzureblobStorage(params *CreateAzureblobStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateAzureblobStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateAzureblobStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -295,7 +276,7 @@ func (a *Client) CreateAzureblobStorage(params *CreateAzureblobStorageParams, op CreateB2Storage creates b2 storage */ func (a *Client) CreateB2Storage(params *CreateB2StorageParams, opts ...ClientOption) (*CreateB2StorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateB2StorageParams() } @@ -314,17 +295,22 @@ func (a *Client) CreateB2Storage(params *CreateB2StorageParams, opts ...ClientOp for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateB2StorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateB2Storage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -333,7 +319,7 @@ func (a *Client) CreateB2Storage(params *CreateB2StorageParams, opts ...ClientOp CreateBoxStorage creates box storage */ func (a *Client) CreateBoxStorage(params *CreateBoxStorageParams, opts ...ClientOption) (*CreateBoxStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateBoxStorageParams() } @@ -352,17 +338,22 @@ func (a *Client) CreateBoxStorage(params *CreateBoxStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateBoxStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateBoxStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -371,7 +362,7 @@ func (a *Client) CreateBoxStorage(params *CreateBoxStorageParams, opts ...Client CreateDriveStorage creates drive storage */ func (a *Client) CreateDriveStorage(params *CreateDriveStorageParams, opts ...ClientOption) (*CreateDriveStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateDriveStorageParams() } @@ -390,17 +381,22 @@ func (a *Client) CreateDriveStorage(params *CreateDriveStorageParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateDriveStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateDriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -409,7 +405,7 @@ func (a *Client) CreateDriveStorage(params *CreateDriveStorageParams, opts ...Cl CreateDropboxStorage creates dropbox storage */ func (a *Client) CreateDropboxStorage(params *CreateDropboxStorageParams, opts ...ClientOption) (*CreateDropboxStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateDropboxStorageParams() } @@ -428,17 +424,22 @@ func (a *Client) CreateDropboxStorage(params *CreateDropboxStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateDropboxStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateDropboxStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -447,7 +448,7 @@ func (a *Client) CreateDropboxStorage(params *CreateDropboxStorageParams, opts . CreateFichierStorage creates fichier storage */ func (a *Client) CreateFichierStorage(params *CreateFichierStorageParams, opts ...ClientOption) (*CreateFichierStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateFichierStorageParams() } @@ -466,17 +467,22 @@ func (a *Client) CreateFichierStorage(params *CreateFichierStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateFichierStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateFichierStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -485,7 +491,7 @@ func (a *Client) CreateFichierStorage(params *CreateFichierStorageParams, opts . CreateFilefabricStorage creates filefabric storage */ func (a *Client) CreateFilefabricStorage(params *CreateFilefabricStorageParams, opts ...ClientOption) (*CreateFilefabricStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateFilefabricStorageParams() } @@ -504,17 +510,22 @@ func (a *Client) CreateFilefabricStorage(params *CreateFilefabricStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateFilefabricStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateFilefabricStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -523,7 +534,7 @@ func (a *Client) CreateFilefabricStorage(params *CreateFilefabricStorageParams, CreateFtpStorage creates ftp storage */ func (a *Client) CreateFtpStorage(params *CreateFtpStorageParams, opts ...ClientOption) (*CreateFtpStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateFtpStorageParams() } @@ -542,17 +553,22 @@ func (a *Client) CreateFtpStorage(params *CreateFtpStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateFtpStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateFtpStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -561,7 +577,7 @@ func (a *Client) CreateFtpStorage(params *CreateFtpStorageParams, opts ...Client CreateGcsStorage creates gcs storage */ func (a *Client) CreateGcsStorage(params *CreateGcsStorageParams, opts ...ClientOption) (*CreateGcsStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateGcsStorageParams() } @@ -580,17 +596,22 @@ func (a *Client) CreateGcsStorage(params *CreateGcsStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateGcsStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateGcsStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -599,7 +620,7 @@ func (a *Client) CreateGcsStorage(params *CreateGcsStorageParams, opts ...Client CreateGphotosStorage creates gphotos storage */ func (a *Client) CreateGphotosStorage(params *CreateGphotosStorageParams, opts ...ClientOption) (*CreateGphotosStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateGphotosStorageParams() } @@ -618,17 +639,22 @@ func (a *Client) CreateGphotosStorage(params *CreateGphotosStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateGphotosStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateGphotosStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -637,7 +663,7 @@ func (a *Client) CreateGphotosStorage(params *CreateGphotosStorageParams, opts . CreateHdfsStorage creates hdfs storage */ func (a *Client) CreateHdfsStorage(params *CreateHdfsStorageParams, opts ...ClientOption) (*CreateHdfsStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateHdfsStorageParams() } @@ -656,17 +682,22 @@ func (a *Client) CreateHdfsStorage(params *CreateHdfsStorageParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateHdfsStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateHdfsStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -675,7 +706,7 @@ func (a *Client) CreateHdfsStorage(params *CreateHdfsStorageParams, opts ...Clie CreateHidriveStorage creates hidrive storage */ func (a *Client) CreateHidriveStorage(params *CreateHidriveStorageParams, opts ...ClientOption) (*CreateHidriveStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateHidriveStorageParams() } @@ -694,17 +725,22 @@ func (a *Client) CreateHidriveStorage(params *CreateHidriveStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateHidriveStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateHidriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -713,7 +749,7 @@ func (a *Client) CreateHidriveStorage(params *CreateHidriveStorageParams, opts . CreateHTTPStorage creates Http storage */ func (a *Client) CreateHTTPStorage(params *CreateHTTPStorageParams, opts ...ClientOption) (*CreateHTTPStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateHTTPStorageParams() } @@ -732,17 +768,22 @@ func (a *Client) CreateHTTPStorage(params *CreateHTTPStorageParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateHTTPStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateHttpStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -751,7 +792,7 @@ func (a *Client) CreateHTTPStorage(params *CreateHTTPStorageParams, opts ...Clie CreateInternetarchiveStorage creates internetarchive storage */ func (a *Client) CreateInternetarchiveStorage(params *CreateInternetarchiveStorageParams, opts ...ClientOption) (*CreateInternetarchiveStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateInternetarchiveStorageParams() } @@ -770,17 +811,22 @@ func (a *Client) CreateInternetarchiveStorage(params *CreateInternetarchiveStora for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateInternetarchiveStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateInternetarchiveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -789,7 +835,7 @@ func (a *Client) CreateInternetarchiveStorage(params *CreateInternetarchiveStora CreateJottacloudStorage creates jottacloud storage */ func (a *Client) CreateJottacloudStorage(params *CreateJottacloudStorageParams, opts ...ClientOption) (*CreateJottacloudStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateJottacloudStorageParams() } @@ -808,17 +854,22 @@ func (a *Client) CreateJottacloudStorage(params *CreateJottacloudStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateJottacloudStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateJottacloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -827,7 +878,7 @@ func (a *Client) CreateJottacloudStorage(params *CreateJottacloudStorageParams, CreateKoofrDigistorageStorage creates koofr storage with digistorage digi storage https storage rcs rds ro */ func (a *Client) CreateKoofrDigistorageStorage(params *CreateKoofrDigistorageStorageParams, opts ...ClientOption) (*CreateKoofrDigistorageStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateKoofrDigistorageStorageParams() } @@ -846,17 +897,22 @@ func (a *Client) CreateKoofrDigistorageStorage(params *CreateKoofrDigistorageSto for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateKoofrDigistorageStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateKoofrDigistorageStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -865,7 +921,7 @@ func (a *Client) CreateKoofrDigistorageStorage(params *CreateKoofrDigistorageSto CreateKoofrKoofrStorage creates koofr storage with koofr koofr https app koofr net */ func (a *Client) CreateKoofrKoofrStorage(params *CreateKoofrKoofrStorageParams, opts ...ClientOption) (*CreateKoofrKoofrStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateKoofrKoofrStorageParams() } @@ -884,17 +940,22 @@ func (a *Client) CreateKoofrKoofrStorage(params *CreateKoofrKoofrStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateKoofrKoofrStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateKoofrKoofrStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -903,7 +964,7 @@ func (a *Client) CreateKoofrKoofrStorage(params *CreateKoofrKoofrStorageParams, CreateKoofrOtherStorage creates koofr storage with other any other koofr API compatible storage service */ func (a *Client) CreateKoofrOtherStorage(params *CreateKoofrOtherStorageParams, opts ...ClientOption) (*CreateKoofrOtherStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateKoofrOtherStorageParams() } @@ -922,17 +983,22 @@ func (a *Client) CreateKoofrOtherStorage(params *CreateKoofrOtherStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateKoofrOtherStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateKoofrOtherStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -941,7 +1007,7 @@ func (a *Client) CreateKoofrOtherStorage(params *CreateKoofrOtherStorageParams, CreateLocalStorage creates local storage */ func (a *Client) CreateLocalStorage(params *CreateLocalStorageParams, opts ...ClientOption) (*CreateLocalStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateLocalStorageParams() } @@ -960,17 +1026,22 @@ func (a *Client) CreateLocalStorage(params *CreateLocalStorageParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateLocalStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateLocalStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -979,7 +1050,7 @@ func (a *Client) CreateLocalStorage(params *CreateLocalStorageParams, opts ...Cl CreateMailruStorage creates mailru storage */ func (a *Client) CreateMailruStorage(params *CreateMailruStorageParams, opts ...ClientOption) (*CreateMailruStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateMailruStorageParams() } @@ -998,17 +1069,22 @@ func (a *Client) CreateMailruStorage(params *CreateMailruStorageParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateMailruStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateMailruStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1017,7 +1093,7 @@ func (a *Client) CreateMailruStorage(params *CreateMailruStorageParams, opts ... CreateMegaStorage creates mega storage */ func (a *Client) CreateMegaStorage(params *CreateMegaStorageParams, opts ...ClientOption) (*CreateMegaStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateMegaStorageParams() } @@ -1036,17 +1112,22 @@ func (a *Client) CreateMegaStorage(params *CreateMegaStorageParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateMegaStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateMegaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1055,7 +1136,7 @@ func (a *Client) CreateMegaStorage(params *CreateMegaStorageParams, opts ...Clie CreateNetstorageStorage creates netstorage storage */ func (a *Client) CreateNetstorageStorage(params *CreateNetstorageStorageParams, opts ...ClientOption) (*CreateNetstorageStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateNetstorageStorageParams() } @@ -1074,17 +1155,22 @@ func (a *Client) CreateNetstorageStorage(params *CreateNetstorageStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateNetstorageStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateNetstorageStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1093,7 +1179,7 @@ func (a *Client) CreateNetstorageStorage(params *CreateNetstorageStorageParams, CreateOnedriveStorage creates onedrive storage */ func (a *Client) CreateOnedriveStorage(params *CreateOnedriveStorageParams, opts ...ClientOption) (*CreateOnedriveStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOnedriveStorageParams() } @@ -1112,17 +1198,22 @@ func (a *Client) CreateOnedriveStorage(params *CreateOnedriveStorageParams, opts for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOnedriveStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOnedriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1131,7 +1222,7 @@ func (a *Client) CreateOnedriveStorage(params *CreateOnedriveStorageParams, opts CreateOosEnvAuthStorage creates oos storage with env auth automatically pickup the credentials from runtime env first one to provide auth wins */ func (a *Client) CreateOosEnvAuthStorage(params *CreateOosEnvAuthStorageParams, opts ...ClientOption) (*CreateOosEnvAuthStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOosEnvAuthStorageParams() } @@ -1150,17 +1241,22 @@ func (a *Client) CreateOosEnvAuthStorage(params *CreateOosEnvAuthStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOosEnvAuthStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOosEnv_authStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1169,7 +1265,7 @@ func (a *Client) CreateOosEnvAuthStorage(params *CreateOosEnvAuthStorageParams, CreateOosInstancePrincipalAuthStorage creates oos storage with instance principal auth use instance principals to authorize an instance to make API calls */ func (a *Client) CreateOosInstancePrincipalAuthStorage(params *CreateOosInstancePrincipalAuthStorageParams, opts ...ClientOption) (*CreateOosInstancePrincipalAuthStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOosInstancePrincipalAuthStorageParams() } @@ -1188,17 +1284,22 @@ func (a *Client) CreateOosInstancePrincipalAuthStorage(params *CreateOosInstance for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOosInstancePrincipalAuthStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOosInstance_principal_authStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1207,7 +1308,7 @@ func (a *Client) CreateOosInstancePrincipalAuthStorage(params *CreateOosInstance CreateOosNoAuthStorage creates oos storage with no auth no credentials needed this is typically for reading public buckets */ func (a *Client) CreateOosNoAuthStorage(params *CreateOosNoAuthStorageParams, opts ...ClientOption) (*CreateOosNoAuthStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOosNoAuthStorageParams() } @@ -1226,17 +1327,22 @@ func (a *Client) CreateOosNoAuthStorage(params *CreateOosNoAuthStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOosNoAuthStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOosNo_authStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1245,7 +1351,7 @@ func (a *Client) CreateOosNoAuthStorage(params *CreateOosNoAuthStorageParams, op CreateOosResourcePrincipalAuthStorage creates oos storage with resource principal auth use resource principals to make API calls */ func (a *Client) CreateOosResourcePrincipalAuthStorage(params *CreateOosResourcePrincipalAuthStorageParams, opts ...ClientOption) (*CreateOosResourcePrincipalAuthStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOosResourcePrincipalAuthStorageParams() } @@ -1264,17 +1370,22 @@ func (a *Client) CreateOosResourcePrincipalAuthStorage(params *CreateOosResource for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOosResourcePrincipalAuthStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOosResource_principal_authStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1283,7 +1394,7 @@ func (a *Client) CreateOosResourcePrincipalAuthStorage(params *CreateOosResource CreateOosUserPrincipalAuthStorage creates oos storage with user principal auth use an o c i user and an API key for authentication */ func (a *Client) CreateOosUserPrincipalAuthStorage(params *CreateOosUserPrincipalAuthStorageParams, opts ...ClientOption) (*CreateOosUserPrincipalAuthStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOosUserPrincipalAuthStorageParams() } @@ -1302,26 +1413,74 @@ func (a *Client) CreateOosUserPrincipalAuthStorage(params *CreateOosUserPrincipa for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOosUserPrincipalAuthStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOosUser_principal_authStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateOosWorkloadIdentityAuthStorage creates oos storage with workload identity auth use workload identity to grant o c i container engine for kubernetes workloads policy driven access to o c i resources using o c i identity and access management i a m +*/ +func (a *Client) CreateOosWorkloadIdentityAuthStorage(params *CreateOosWorkloadIdentityAuthStorageParams, opts ...ClientOption) (*CreateOosWorkloadIdentityAuthStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateOosWorkloadIdentityAuthStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateOosWorkload_identity_authStorage", + Method: "POST", + PathPattern: "/storage/oos/workload_identity_auth", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateOosWorkloadIdentityAuthStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateOosWorkloadIdentityAuthStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateOosWorkload_identity_authStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateOpendriveStorage creates opendrive storage */ func (a *Client) CreateOpendriveStorage(params *CreateOpendriveStorageParams, opts ...ClientOption) (*CreateOpendriveStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateOpendriveStorageParams() } @@ -1340,17 +1499,22 @@ func (a *Client) CreateOpendriveStorage(params *CreateOpendriveStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateOpendriveStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateOpendriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1359,7 +1523,7 @@ func (a *Client) CreateOpendriveStorage(params *CreateOpendriveStorageParams, op CreatePcloudStorage creates pcloud storage */ func (a *Client) CreatePcloudStorage(params *CreatePcloudStorageParams, opts ...ClientOption) (*CreatePcloudStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreatePcloudStorageParams() } @@ -1378,17 +1542,22 @@ func (a *Client) CreatePcloudStorage(params *CreatePcloudStorageParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreatePcloudStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreatePcloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1397,7 +1566,7 @@ func (a *Client) CreatePcloudStorage(params *CreatePcloudStorageParams, opts ... CreatePremiumizemeStorage creates premiumizeme storage */ func (a *Client) CreatePremiumizemeStorage(params *CreatePremiumizemeStorageParams, opts ...ClientOption) (*CreatePremiumizemeStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreatePremiumizemeStorageParams() } @@ -1416,17 +1585,22 @@ func (a *Client) CreatePremiumizemeStorage(params *CreatePremiumizemeStoragePara for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreatePremiumizemeStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreatePremiumizemeStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1435,7 +1609,7 @@ func (a *Client) CreatePremiumizemeStorage(params *CreatePremiumizemeStoragePara CreatePutioStorage creates putio storage */ func (a *Client) CreatePutioStorage(params *CreatePutioStorageParams, opts ...ClientOption) (*CreatePutioStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreatePutioStorageParams() } @@ -1454,17 +1628,22 @@ func (a *Client) CreatePutioStorage(params *CreatePutioStorageParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreatePutioStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreatePutioStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1473,7 +1652,7 @@ func (a *Client) CreatePutioStorage(params *CreatePutioStorageParams, opts ...Cl CreateQingstorStorage creates qingstor storage */ func (a *Client) CreateQingstorStorage(params *CreateQingstorStorageParams, opts ...ClientOption) (*CreateQingstorStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateQingstorStorageParams() } @@ -1492,17 +1671,22 @@ func (a *Client) CreateQingstorStorage(params *CreateQingstorStorageParams, opts for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateQingstorStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateQingstorStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1511,7 +1695,7 @@ func (a *Client) CreateQingstorStorage(params *CreateQingstorStorageParams, opts CreateS3AWSStorage creates s3 storage with a w s amazon web services a w s s3 */ func (a *Client) CreateS3AWSStorage(params *CreateS3AWSStorageParams, opts ...ClientOption) (*CreateS3AWSStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3AWSStorageParams() } @@ -1530,17 +1714,22 @@ func (a *Client) CreateS3AWSStorage(params *CreateS3AWSStorageParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3AWSStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3AWSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1549,7 +1738,7 @@ func (a *Client) CreateS3AWSStorage(params *CreateS3AWSStorageParams, opts ...Cl CreateS3AlibabaStorage creates s3 storage with alibaba alibaba cloud object storage system o s s formerly aliyun */ func (a *Client) CreateS3AlibabaStorage(params *CreateS3AlibabaStorageParams, opts ...ClientOption) (*CreateS3AlibabaStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3AlibabaStorageParams() } @@ -1568,17 +1757,22 @@ func (a *Client) CreateS3AlibabaStorage(params *CreateS3AlibabaStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3AlibabaStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3AlibabaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1587,7 +1781,7 @@ func (a *Client) CreateS3AlibabaStorage(params *CreateS3AlibabaStorageParams, op CreateS3ArvanCloudStorage creates s3 storage with arvan cloud arvan cloud object storage a o s */ func (a *Client) CreateS3ArvanCloudStorage(params *CreateS3ArvanCloudStorageParams, opts ...ClientOption) (*CreateS3ArvanCloudStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3ArvanCloudStorageParams() } @@ -1606,17 +1800,22 @@ func (a *Client) CreateS3ArvanCloudStorage(params *CreateS3ArvanCloudStoragePara for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3ArvanCloudStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3ArvanCloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1625,7 +1824,7 @@ func (a *Client) CreateS3ArvanCloudStorage(params *CreateS3ArvanCloudStoragePara CreateS3CephStorage creates s3 storage with ceph ceph object storage */ func (a *Client) CreateS3CephStorage(params *CreateS3CephStorageParams, opts ...ClientOption) (*CreateS3CephStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3CephStorageParams() } @@ -1644,17 +1843,22 @@ func (a *Client) CreateS3CephStorage(params *CreateS3CephStorageParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3CephStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3CephStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1663,7 +1867,7 @@ func (a *Client) CreateS3CephStorage(params *CreateS3CephStorageParams, opts ... CreateS3ChinaMobileStorage creates s3 storage with china mobile china mobile ecloud elastic object storage e o s */ func (a *Client) CreateS3ChinaMobileStorage(params *CreateS3ChinaMobileStorageParams, opts ...ClientOption) (*CreateS3ChinaMobileStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3ChinaMobileStorageParams() } @@ -1682,17 +1886,22 @@ func (a *Client) CreateS3ChinaMobileStorage(params *CreateS3ChinaMobileStoragePa for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3ChinaMobileStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3ChinaMobileStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1701,7 +1910,7 @@ func (a *Client) CreateS3ChinaMobileStorage(params *CreateS3ChinaMobileStoragePa CreateS3CloudflareStorage creates s3 storage with cloudflare cloudflare r2 storage */ func (a *Client) CreateS3CloudflareStorage(params *CreateS3CloudflareStorageParams, opts ...ClientOption) (*CreateS3CloudflareStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3CloudflareStorageParams() } @@ -1720,17 +1929,22 @@ func (a *Client) CreateS3CloudflareStorage(params *CreateS3CloudflareStoragePara for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3CloudflareStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3CloudflareStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1739,7 +1953,7 @@ func (a *Client) CreateS3CloudflareStorage(params *CreateS3CloudflareStoragePara CreateS3DigitalOceanStorage creates s3 storage with digital ocean digital ocean spaces */ func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorageParams, opts ...ClientOption) (*CreateS3DigitalOceanStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3DigitalOceanStorageParams() } @@ -1758,17 +1972,22 @@ func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorage for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3DigitalOceanStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3DigitalOceanStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1777,7 +1996,7 @@ func (a *Client) CreateS3DigitalOceanStorage(params *CreateS3DigitalOceanStorage CreateS3DreamhostStorage creates s3 storage with dreamhost dreamhost dream objects */ func (a *Client) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams, opts ...ClientOption) (*CreateS3DreamhostStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3DreamhostStorageParams() } @@ -1796,93 +2015,151 @@ func (a *Client) CreateS3DreamhostStorage(params *CreateS3DreamhostStorageParams for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3DreamhostStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3DreamhostStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3HuaweiOBSStorage creates s3 storage with huawei o b s huawei object storage service +CreateS3GCSStorage creates s3 storage with g c s google cloud storage */ -func (a *Client) CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams, opts ...ClientOption) (*CreateS3HuaweiOBSStorageOK, error) { - // TODO: Validate the params before sending +func (a *Client) CreateS3GCSStorage(params *CreateS3GCSStorageParams, opts ...ClientOption) (*CreateS3GCSStorageOK, error) { + // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3HuaweiOBSStorageParams() + params = NewCreateS3GCSStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3HuaweiOBSStorage", + ID: "CreateS3GCSStorage", Method: "POST", - PathPattern: "/storage/s3/huaweiobs", + PathPattern: "/storage/s3/gcs", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3HuaweiOBSStorageReader{formats: a.formats}, + Reader: &CreateS3GCSStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } - success, ok := result.(*CreateS3HuaweiOBSStorageOK) + + // only one success response has to be checked + success, ok := result.(*CreateS3GCSStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for CreateS3HuaweiOBSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3GCSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } /* -CreateS3IBMCOSStorage creates s3 storage with i b m c o s i b m c o s s3 +CreateS3HuaweiOBSStorage creates s3 storage with huawei o b s huawei object storage service */ -func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts ...ClientOption) (*CreateS3IBMCOSStorageOK, error) { - // TODO: Validate the params before sending +func (a *Client) CreateS3HuaweiOBSStorage(params *CreateS3HuaweiOBSStorageParams, opts ...ClientOption) (*CreateS3HuaweiOBSStorageOK, error) { + // NOTE: parameters are not validated before sending if params == nil { - params = NewCreateS3IBMCOSStorageParams() + params = NewCreateS3HuaweiOBSStorageParams() } op := &runtime.ClientOperation{ - ID: "CreateS3IBMCOSStorage", + ID: "CreateS3HuaweiOBSStorage", Method: "POST", - PathPattern: "/storage/s3/ibmcos", + PathPattern: "/storage/s3/huaweiobs", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, - Reader: &CreateS3IBMCOSStorageReader{formats: a.formats}, + Reader: &CreateS3HuaweiOBSStorageReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, } for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } - success, ok := result.(*CreateS3IBMCOSStorageOK) - if ok { + + // only one success response has to be checked + success, ok := result.(*CreateS3HuaweiOBSStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3HuaweiOBSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateS3IBMCOSStorage creates s3 storage with i b m c o s i b m c o s s3 +*/ +func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts ...ClientOption) (*CreateS3IBMCOSStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3IBMCOSStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3IBMCOSStorage", + Method: "POST", + PathPattern: "/storage/s3/ibmcos", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3IBMCOSStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3IBMCOSStorageOK) + if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3IBMCOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1891,7 +2168,7 @@ func (a *Client) CreateS3IBMCOSStorage(params *CreateS3IBMCOSStorageParams, opts CreateS3IDriveStorage creates s3 storage with i drive i drive e2 */ func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts ...ClientOption) (*CreateS3IDriveStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3IDriveStorageParams() } @@ -1910,17 +2187,22 @@ func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3IDriveStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3IDriveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -1929,7 +2211,7 @@ func (a *Client) CreateS3IDriveStorage(params *CreateS3IDriveStorageParams, opts CreateS3IONOSStorage creates s3 storage with i o n o s i o n o s cloud */ func (a *Client) CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts ...ClientOption) (*CreateS3IONOSStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3IONOSStorageParams() } @@ -1948,26 +2230,74 @@ func (a *Client) CreateS3IONOSStorage(params *CreateS3IONOSStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3IONOSStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3IONOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateS3LeviiaStorage creates s3 storage with leviia leviia object storage +*/ +func (a *Client) CreateS3LeviiaStorage(params *CreateS3LeviiaStorageParams, opts ...ClientOption) (*CreateS3LeviiaStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3LeviiaStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3LeviiaStorage", + Method: "POST", + PathPattern: "/storage/s3/leviia", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3LeviiaStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3LeviiaStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3LeviiaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3LiaraStorage creates s3 storage with liara liara object storage */ func (a *Client) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts ...ClientOption) (*CreateS3LiaraStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3LiaraStorageParams() } @@ -1986,26 +2316,74 @@ func (a *Client) CreateS3LiaraStorage(params *CreateS3LiaraStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3LiaraStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3LiaraStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateS3LinodeStorage creates s3 storage with linode linode object storage +*/ +func (a *Client) CreateS3LinodeStorage(params *CreateS3LinodeStorageParams, opts ...ClientOption) (*CreateS3LinodeStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3LinodeStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3LinodeStorage", + Method: "POST", + PathPattern: "/storage/s3/linode", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3LinodeStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3LinodeStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3LinodeStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3LyveCloudStorage creates s3 storage with lyve cloud seagate lyve cloud */ func (a *Client) CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams, opts ...ClientOption) (*CreateS3LyveCloudStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3LyveCloudStorageParams() } @@ -2024,26 +2402,74 @@ func (a *Client) CreateS3LyveCloudStorage(params *CreateS3LyveCloudStorageParams for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3LyveCloudStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3LyveCloudStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateS3MagaluStorage creates s3 storage with magalu magalu object storage +*/ +func (a *Client) CreateS3MagaluStorage(params *CreateS3MagaluStorageParams, opts ...ClientOption) (*CreateS3MagaluStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3MagaluStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3MagaluStorage", + Method: "POST", + PathPattern: "/storage/s3/magalu", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3MagaluStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3MagaluStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3MagaluStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3MinioStorage creates s3 storage with minio minio object storage */ func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts ...ClientOption) (*CreateS3MinioStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3MinioStorageParams() } @@ -2062,17 +2488,22 @@ func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3MinioStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3MinioStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2081,7 +2512,7 @@ func (a *Client) CreateS3MinioStorage(params *CreateS3MinioStorageParams, opts . CreateS3NeteaseStorage creates s3 storage with netease netease object storage n o s */ func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, opts ...ClientOption) (*CreateS3NeteaseStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3NeteaseStorageParams() } @@ -2100,17 +2531,22 @@ func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3NeteaseStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3NeteaseStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2119,7 +2555,7 @@ func (a *Client) CreateS3NeteaseStorage(params *CreateS3NeteaseStorageParams, op CreateS3OtherStorage creates s3 storage with other any other s3 compatible provider */ func (a *Client) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts ...ClientOption) (*CreateS3OtherStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3OtherStorageParams() } @@ -2138,26 +2574,74 @@ func (a *Client) CreateS3OtherStorage(params *CreateS3OtherStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3OtherStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3OtherStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateS3PetaboxStorage creates s3 storage with petabox petabox object storage +*/ +func (a *Client) CreateS3PetaboxStorage(params *CreateS3PetaboxStorageParams, opts ...ClientOption) (*CreateS3PetaboxStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3PetaboxStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3PetaboxStorage", + Method: "POST", + PathPattern: "/storage/s3/petabox", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3PetaboxStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3PetaboxStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3PetaboxStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3QiniuStorage creates s3 storage with qiniu qiniu object storage kodo */ func (a *Client) CreateS3QiniuStorage(params *CreateS3QiniuStorageParams, opts ...ClientOption) (*CreateS3QiniuStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3QiniuStorageParams() } @@ -2176,17 +2660,22 @@ func (a *Client) CreateS3QiniuStorage(params *CreateS3QiniuStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3QiniuStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3QiniuStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2195,7 +2684,7 @@ func (a *Client) CreateS3QiniuStorage(params *CreateS3QiniuStorageParams, opts . CreateS3RackCorpStorage creates s3 storage with rack corp rack corp object storage */ func (a *Client) CreateS3RackCorpStorage(params *CreateS3RackCorpStorageParams, opts ...ClientOption) (*CreateS3RackCorpStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3RackCorpStorageParams() } @@ -2214,26 +2703,74 @@ func (a *Client) CreateS3RackCorpStorage(params *CreateS3RackCorpStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3RackCorpStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3RackCorpStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateS3RcloneStorage creates s3 storage with rclone rclone s3 server +*/ +func (a *Client) CreateS3RcloneStorage(params *CreateS3RcloneStorageParams, opts ...ClientOption) (*CreateS3RcloneStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3RcloneStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3RcloneStorage", + Method: "POST", + PathPattern: "/storage/s3/rclone", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3RcloneStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3RcloneStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3RcloneStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3ScalewayStorage creates s3 storage with scaleway scaleway object storage */ func (a *Client) CreateS3ScalewayStorage(params *CreateS3ScalewayStorageParams, opts ...ClientOption) (*CreateS3ScalewayStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3ScalewayStorageParams() } @@ -2252,17 +2789,22 @@ func (a *Client) CreateS3ScalewayStorage(params *CreateS3ScalewayStorageParams, for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3ScalewayStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3ScalewayStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2271,7 +2813,7 @@ func (a *Client) CreateS3ScalewayStorage(params *CreateS3ScalewayStorageParams, CreateS3SeaweedFSStorage creates s3 storage with seaweed f s seaweed f s s3 */ func (a *Client) CreateS3SeaweedFSStorage(params *CreateS3SeaweedFSStorageParams, opts ...ClientOption) (*CreateS3SeaweedFSStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3SeaweedFSStorageParams() } @@ -2290,17 +2832,22 @@ func (a *Client) CreateS3SeaweedFSStorage(params *CreateS3SeaweedFSStorageParams for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3SeaweedFSStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3SeaweedFSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2309,7 +2856,7 @@ func (a *Client) CreateS3SeaweedFSStorage(params *CreateS3SeaweedFSStorageParams CreateS3StackPathStorage creates s3 storage with stack path stack path object storage */ func (a *Client) CreateS3StackPathStorage(params *CreateS3StackPathStorageParams, opts ...ClientOption) (*CreateS3StackPathStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3StackPathStorageParams() } @@ -2328,17 +2875,22 @@ func (a *Client) CreateS3StackPathStorage(params *CreateS3StackPathStorageParams for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3StackPathStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3StackPathStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2347,7 +2899,7 @@ func (a *Client) CreateS3StackPathStorage(params *CreateS3StackPathStorageParams CreateS3StorjStorage creates s3 storage with storj storj s3 compatible gateway */ func (a *Client) CreateS3StorjStorage(params *CreateS3StorjStorageParams, opts ...ClientOption) (*CreateS3StorjStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3StorjStorageParams() } @@ -2366,26 +2918,74 @@ func (a *Client) CreateS3StorjStorage(params *CreateS3StorjStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3StorjStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3StorjStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } +/* +CreateS3SynologyStorage creates s3 storage with synology synology c2 object storage +*/ +func (a *Client) CreateS3SynologyStorage(params *CreateS3SynologyStorageParams, opts ...ClientOption) (*CreateS3SynologyStorageOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateS3SynologyStorageParams() + } + op := &runtime.ClientOperation{ + ID: "CreateS3SynologyStorage", + Method: "POST", + PathPattern: "/storage/s3/synology", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateS3SynologyStorageReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateS3SynologyStorageOK) + if ok { + return success, nil + } + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for CreateS3SynologyStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* CreateS3TencentCOSStorage creates s3 storage with tencent c o s tencent cloud object storage c o s */ func (a *Client) CreateS3TencentCOSStorage(params *CreateS3TencentCOSStorageParams, opts ...ClientOption) (*CreateS3TencentCOSStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3TencentCOSStorageParams() } @@ -2404,17 +3004,22 @@ func (a *Client) CreateS3TencentCOSStorage(params *CreateS3TencentCOSStoragePara for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3TencentCOSStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3TencentCOSStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2423,7 +3028,7 @@ func (a *Client) CreateS3TencentCOSStorage(params *CreateS3TencentCOSStoragePara CreateS3WasabiStorage creates s3 storage with wasabi wasabi object storage */ func (a *Client) CreateS3WasabiStorage(params *CreateS3WasabiStorageParams, opts ...ClientOption) (*CreateS3WasabiStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateS3WasabiStorageParams() } @@ -2442,17 +3047,22 @@ func (a *Client) CreateS3WasabiStorage(params *CreateS3WasabiStorageParams, opts for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateS3WasabiStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateS3WasabiStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2461,7 +3071,7 @@ func (a *Client) CreateS3WasabiStorage(params *CreateS3WasabiStorageParams, opts CreateSeafileStorage creates seafile storage */ func (a *Client) CreateSeafileStorage(params *CreateSeafileStorageParams, opts ...ClientOption) (*CreateSeafileStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSeafileStorageParams() } @@ -2480,17 +3090,22 @@ func (a *Client) CreateSeafileStorage(params *CreateSeafileStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSeafileStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSeafileStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2499,7 +3114,7 @@ func (a *Client) CreateSeafileStorage(params *CreateSeafileStorageParams, opts . CreateSftpStorage creates sftp storage */ func (a *Client) CreateSftpStorage(params *CreateSftpStorageParams, opts ...ClientOption) (*CreateSftpStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSftpStorageParams() } @@ -2518,17 +3133,22 @@ func (a *Client) CreateSftpStorage(params *CreateSftpStorageParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSftpStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSftpStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2537,7 +3157,7 @@ func (a *Client) CreateSftpStorage(params *CreateSftpStorageParams, opts ...Clie CreateSharefileStorage creates sharefile storage */ func (a *Client) CreateSharefileStorage(params *CreateSharefileStorageParams, opts ...ClientOption) (*CreateSharefileStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSharefileStorageParams() } @@ -2556,17 +3176,22 @@ func (a *Client) CreateSharefileStorage(params *CreateSharefileStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSharefileStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSharefileStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2575,7 +3200,7 @@ func (a *Client) CreateSharefileStorage(params *CreateSharefileStorageParams, op CreateSiaStorage creates sia storage */ func (a *Client) CreateSiaStorage(params *CreateSiaStorageParams, opts ...ClientOption) (*CreateSiaStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSiaStorageParams() } @@ -2594,17 +3219,22 @@ func (a *Client) CreateSiaStorage(params *CreateSiaStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSiaStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSiaStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2613,7 +3243,7 @@ func (a *Client) CreateSiaStorage(params *CreateSiaStorageParams, opts ...Client CreateSmbStorage creates smb storage */ func (a *Client) CreateSmbStorage(params *CreateSmbStorageParams, opts ...ClientOption) (*CreateSmbStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSmbStorageParams() } @@ -2632,17 +3262,22 @@ func (a *Client) CreateSmbStorage(params *CreateSmbStorageParams, opts ...Client for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSmbStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSmbStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2651,7 +3286,7 @@ func (a *Client) CreateSmbStorage(params *CreateSmbStorageParams, opts ...Client CreateStorjExistingStorage creates storj storage with existing use an existing access grant */ func (a *Client) CreateStorjExistingStorage(params *CreateStorjExistingStorageParams, opts ...ClientOption) (*CreateStorjExistingStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateStorjExistingStorageParams() } @@ -2670,17 +3305,22 @@ func (a *Client) CreateStorjExistingStorage(params *CreateStorjExistingStoragePa for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateStorjExistingStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateStorjExistingStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2689,7 +3329,7 @@ func (a *Client) CreateStorjExistingStorage(params *CreateStorjExistingStoragePa CreateStorjNewStorage creates storj storage with new create a new access grant from satellite address API key and passphrase */ func (a *Client) CreateStorjNewStorage(params *CreateStorjNewStorageParams, opts ...ClientOption) (*CreateStorjNewStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateStorjNewStorageParams() } @@ -2708,17 +3348,22 @@ func (a *Client) CreateStorjNewStorage(params *CreateStorjNewStorageParams, opts for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateStorjNewStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateStorjNewStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2727,7 +3372,7 @@ func (a *Client) CreateStorjNewStorage(params *CreateStorjNewStorageParams, opts CreateSugarsyncStorage creates sugarsync storage */ func (a *Client) CreateSugarsyncStorage(params *CreateSugarsyncStorageParams, opts ...ClientOption) (*CreateSugarsyncStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSugarsyncStorageParams() } @@ -2746,17 +3391,22 @@ func (a *Client) CreateSugarsyncStorage(params *CreateSugarsyncStorageParams, op for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSugarsyncStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSugarsyncStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2765,7 +3415,7 @@ func (a *Client) CreateSugarsyncStorage(params *CreateSugarsyncStorageParams, op CreateSwiftStorage creates swift storage */ func (a *Client) CreateSwiftStorage(params *CreateSwiftStorageParams, opts ...ClientOption) (*CreateSwiftStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateSwiftStorageParams() } @@ -2784,17 +3434,22 @@ func (a *Client) CreateSwiftStorage(params *CreateSwiftStorageParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateSwiftStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateSwiftStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2803,7 +3458,7 @@ func (a *Client) CreateSwiftStorage(params *CreateSwiftStorageParams, opts ...Cl CreateUnionStorage creates union storage */ func (a *Client) CreateUnionStorage(params *CreateUnionStorageParams, opts ...ClientOption) (*CreateUnionStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateUnionStorageParams() } @@ -2822,17 +3477,22 @@ func (a *Client) CreateUnionStorage(params *CreateUnionStorageParams, opts ...Cl for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateUnionStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateUnionStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2841,7 +3501,7 @@ func (a *Client) CreateUnionStorage(params *CreateUnionStorageParams, opts ...Cl CreateUptoboxStorage creates uptobox storage */ func (a *Client) CreateUptoboxStorage(params *CreateUptoboxStorageParams, opts ...ClientOption) (*CreateUptoboxStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateUptoboxStorageParams() } @@ -2860,17 +3520,22 @@ func (a *Client) CreateUptoboxStorage(params *CreateUptoboxStorageParams, opts . for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateUptoboxStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateUptoboxStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2879,7 +3544,7 @@ func (a *Client) CreateUptoboxStorage(params *CreateUptoboxStorageParams, opts . CreateWebdavStorage creates webdav storage */ func (a *Client) CreateWebdavStorage(params *CreateWebdavStorageParams, opts ...ClientOption) (*CreateWebdavStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateWebdavStorageParams() } @@ -2898,17 +3563,22 @@ func (a *Client) CreateWebdavStorage(params *CreateWebdavStorageParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateWebdavStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateWebdavStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2917,7 +3587,7 @@ func (a *Client) CreateWebdavStorage(params *CreateWebdavStorageParams, opts ... CreateYandexStorage creates yandex storage */ func (a *Client) CreateYandexStorage(params *CreateYandexStorageParams, opts ...ClientOption) (*CreateYandexStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateYandexStorageParams() } @@ -2936,17 +3606,22 @@ func (a *Client) CreateYandexStorage(params *CreateYandexStorageParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateYandexStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateYandexStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2955,7 +3630,7 @@ func (a *Client) CreateYandexStorage(params *CreateYandexStorageParams, opts ... CreateZohoStorage creates zoho storage */ func (a *Client) CreateZohoStorage(params *CreateZohoStorageParams, opts ...ClientOption) (*CreateZohoStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewCreateZohoStorageParams() } @@ -2974,17 +3649,22 @@ func (a *Client) CreateZohoStorage(params *CreateZohoStorageParams, opts ...Clie for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*CreateZohoStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for CreateZohoStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -2993,7 +3673,7 @@ func (a *Client) CreateZohoStorage(params *CreateZohoStorageParams, opts ...Clie ExploreStorage explores directory entries in a storage system */ func (a *Client) ExploreStorage(params *ExploreStorageParams, opts ...ClientOption) (*ExploreStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewExploreStorageParams() } @@ -3012,17 +3692,22 @@ func (a *Client) ExploreStorage(params *ExploreStorageParams, opts ...ClientOpti for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ExploreStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ExploreStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -3031,7 +3716,7 @@ func (a *Client) ExploreStorage(params *ExploreStorageParams, opts ...ClientOpti ListStorages lists all storages */ func (a *Client) ListStorages(params *ListStoragesParams, opts ...ClientOption) (*ListStoragesOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListStoragesParams() } @@ -3050,17 +3735,22 @@ func (a *Client) ListStorages(params *ListStoragesParams, opts ...ClientOption) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListStoragesOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListStorages: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -3069,7 +3759,7 @@ func (a *Client) ListStorages(params *ListStoragesParams, opts ...ClientOption) RemoveStorage removes a storage */ func (a *Client) RemoveStorage(params *RemoveStorageParams, opts ...ClientOption) (*RemoveStorageNoContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRemoveStorageParams() } @@ -3088,17 +3778,22 @@ func (a *Client) RemoveStorage(params *RemoveStorageParams, opts ...ClientOption for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RemoveStorageNoContent) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RemoveStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -3107,7 +3802,7 @@ func (a *Client) RemoveStorage(params *RemoveStorageParams, opts ...ClientOption RenameStorage renames a storage connection */ func (a *Client) RenameStorage(params *RenameStorageParams, opts ...ClientOption) (*RenameStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRenameStorageParams() } @@ -3126,17 +3821,22 @@ func (a *Client) RenameStorage(params *RenameStorageParams, opts ...ClientOption for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RenameStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RenameStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -3145,7 +3845,7 @@ func (a *Client) RenameStorage(params *RenameStorageParams, opts ...ClientOption UpdateStorage updates a storage connection */ func (a *Client) UpdateStorage(params *UpdateStorageParams, opts ...ClientOption) (*UpdateStorageOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewUpdateStorageParams() } @@ -3164,17 +3864,22 @@ func (a *Client) UpdateStorage(params *UpdateStorageParams, opts ...ClientOption for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*UpdateStorageOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for UpdateStorage: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/storage/update_storage_responses.go b/client/swagger/http/storage/update_storage_responses.go index aa2e08ea..5a0e21e2 100644 --- a/client/swagger/http/storage/update_storage_responses.go +++ b/client/swagger/http/storage/update_storage_responses.go @@ -7,6 +7,7 @@ package storage import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type UpdateStorageReader struct { } // ReadResponse reads a server response into the received o. -func (o *UpdateStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *UpdateStorageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewUpdateStorageOK() @@ -110,7 +111,7 @@ func (o *UpdateStorageOK) readResponse(response runtime.ClientResponse, consumer o.Payload = new(models.ModelStorage) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *UpdateStorageBadRequest) readResponse(response runtime.ClientResponse, o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *UpdateStorageInternalServerError) readResponse(response runtime.ClientR o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet/import_wallet_responses.go b/client/swagger/http/wallet/import_wallet_responses.go index 11551cfb..9beb48e1 100644 --- a/client/swagger/http/wallet/import_wallet_responses.go +++ b/client/swagger/http/wallet/import_wallet_responses.go @@ -7,6 +7,7 @@ package wallet import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ImportWalletReader struct { } // ReadResponse reads a server response into the received o. -func (o *ImportWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ImportWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewImportWalletOK() @@ -110,7 +111,7 @@ func (o *ImportWalletOK) readResponse(response runtime.ClientResponse, consumer o.Payload = new(models.ModelWallet) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *ImportWalletBadRequest) readResponse(response runtime.ClientResponse, c o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *ImportWalletInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet/list_wallets_responses.go b/client/swagger/http/wallet/list_wallets_responses.go index abc1cd12..5cce90a9 100644 --- a/client/swagger/http/wallet/list_wallets_responses.go +++ b/client/swagger/http/wallet/list_wallets_responses.go @@ -7,6 +7,7 @@ package wallet import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListWalletsReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListWalletsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListWalletsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListWalletsOK() @@ -108,7 +109,7 @@ func (o *ListWalletsOK) GetPayload() []*models.ModelWallet { func (o *ListWalletsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListWalletsBadRequest) readResponse(response runtime.ClientResponse, co o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListWalletsInternalServerError) readResponse(response runtime.ClientRes o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet/remove_wallet_responses.go b/client/swagger/http/wallet/remove_wallet_responses.go index 0ae445ae..e897687e 100644 --- a/client/swagger/http/wallet/remove_wallet_responses.go +++ b/client/swagger/http/wallet/remove_wallet_responses.go @@ -7,6 +7,7 @@ package wallet import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type RemoveWalletReader struct { } // ReadResponse reads a server response into the received o. -func (o *RemoveWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *RemoveWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 204: result := NewRemoveWalletNoContent() @@ -166,7 +167,7 @@ func (o *RemoveWalletBadRequest) readResponse(response runtime.ClientResponse, c o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -236,7 +237,7 @@ func (o *RemoveWalletInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet/wallet_client.go b/client/swagger/http/wallet/wallet_client.go index 598b3d31..9c6d42dc 100644 --- a/client/swagger/http/wallet/wallet_client.go +++ b/client/swagger/http/wallet/wallet_client.go @@ -69,7 +69,7 @@ type ClientService interface { ImportWallet imports a private key */ func (a *Client) ImportWallet(params *ImportWalletParams, opts ...ClientOption) (*ImportWalletOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewImportWalletParams() } @@ -88,17 +88,22 @@ func (a *Client) ImportWallet(params *ImportWalletParams, opts ...ClientOption) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ImportWalletOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ImportWallet: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -107,7 +112,7 @@ func (a *Client) ImportWallet(params *ImportWalletParams, opts ...ClientOption) ListWallets lists all imported wallets */ func (a *Client) ListWallets(params *ListWalletsParams, opts ...ClientOption) (*ListWalletsOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListWalletsParams() } @@ -126,17 +131,22 @@ func (a *Client) ListWallets(params *ListWalletsParams, opts ...ClientOption) (* for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListWalletsOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListWallets: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -145,7 +155,7 @@ func (a *Client) ListWallets(params *ListWalletsParams, opts ...ClientOption) (* RemoveWallet removes a wallet */ func (a *Client) RemoveWallet(params *RemoveWalletParams, opts ...ClientOption) (*RemoveWalletNoContent, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewRemoveWalletParams() } @@ -164,17 +174,22 @@ func (a *Client) RemoveWallet(params *RemoveWalletParams, opts ...ClientOption) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*RemoveWalletNoContent) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for RemoveWallet: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/http/wallet_association/attach_wallet_responses.go b/client/swagger/http/wallet_association/attach_wallet_responses.go index 434b37da..4cba5be4 100644 --- a/client/swagger/http/wallet_association/attach_wallet_responses.go +++ b/client/swagger/http/wallet_association/attach_wallet_responses.go @@ -7,6 +7,7 @@ package wallet_association import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type AttachWalletReader struct { } // ReadResponse reads a server response into the received o. -func (o *AttachWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *AttachWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewAttachWalletOK() @@ -110,7 +111,7 @@ func (o *AttachWalletOK) readResponse(response runtime.ClientResponse, consumer o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *AttachWalletBadRequest) readResponse(response runtime.ClientResponse, c o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *AttachWalletInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet_association/detach_wallet_responses.go b/client/swagger/http/wallet_association/detach_wallet_responses.go index 4b5509d9..f3140e77 100644 --- a/client/swagger/http/wallet_association/detach_wallet_responses.go +++ b/client/swagger/http/wallet_association/detach_wallet_responses.go @@ -7,6 +7,7 @@ package wallet_association import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type DetachWalletReader struct { } // ReadResponse reads a server response into the received o. -func (o *DetachWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *DetachWalletReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewDetachWalletOK() @@ -110,7 +111,7 @@ func (o *DetachWalletOK) readResponse(response runtime.ClientResponse, consumer o.Payload = new(models.ModelPreparation) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -180,7 +181,7 @@ func (o *DetachWalletBadRequest) readResponse(response runtime.ClientResponse, c o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -250,7 +251,7 @@ func (o *DetachWalletInternalServerError) readResponse(response runtime.ClientRe o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet_association/list_attached_wallets_responses.go b/client/swagger/http/wallet_association/list_attached_wallets_responses.go index 34f668a0..a8e4582e 100644 --- a/client/swagger/http/wallet_association/list_attached_wallets_responses.go +++ b/client/swagger/http/wallet_association/list_attached_wallets_responses.go @@ -7,6 +7,7 @@ package wallet_association import ( "encoding/json" + stderrors "errors" "fmt" "io" @@ -22,7 +23,7 @@ type ListAttachedWalletsReader struct { } // ReadResponse reads a server response into the received o. -func (o *ListAttachedWalletsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { +func (o *ListAttachedWalletsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { switch response.Code() { case 200: result := NewListAttachedWalletsOK() @@ -108,7 +109,7 @@ func (o *ListAttachedWalletsOK) GetPayload() []*models.ModelWallet { func (o *ListAttachedWalletsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -178,7 +179,7 @@ func (o *ListAttachedWalletsBadRequest) readResponse(response runtime.ClientResp o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } @@ -248,7 +249,7 @@ func (o *ListAttachedWalletsInternalServerError) readResponse(response runtime.C o.Payload = new(models.APIHTTPError) // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { return err } diff --git a/client/swagger/http/wallet_association/wallet_association_client.go b/client/swagger/http/wallet_association/wallet_association_client.go index c632ed46..544178e4 100644 --- a/client/swagger/http/wallet_association/wallet_association_client.go +++ b/client/swagger/http/wallet_association/wallet_association_client.go @@ -69,7 +69,7 @@ type ClientService interface { AttachWallet attaches a new wallet with a preparation */ func (a *Client) AttachWallet(params *AttachWalletParams, opts ...ClientOption) (*AttachWalletOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewAttachWalletParams() } @@ -88,17 +88,22 @@ func (a *Client) AttachWallet(params *AttachWalletParams, opts ...ClientOption) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*AttachWalletOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for AttachWallet: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -107,7 +112,7 @@ func (a *Client) AttachWallet(params *AttachWalletParams, opts ...ClientOption) DetachWallet detaches a new wallet from a preparation */ func (a *Client) DetachWallet(params *DetachWalletParams, opts ...ClientOption) (*DetachWalletOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewDetachWalletParams() } @@ -126,17 +131,22 @@ func (a *Client) DetachWallet(params *DetachWalletParams, opts ...ClientOption) for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*DetachWalletOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for DetachWallet: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } @@ -145,7 +155,7 @@ func (a *Client) DetachWallet(params *DetachWalletParams, opts ...ClientOption) ListAttachedWallets lists all wallets of a preparation */ func (a *Client) ListAttachedWallets(params *ListAttachedWalletsParams, opts ...ClientOption) (*ListAttachedWalletsOK, error) { - // TODO: Validate the params before sending + // NOTE: parameters are not validated before sending if params == nil { params = NewListAttachedWalletsParams() } @@ -164,17 +174,22 @@ func (a *Client) ListAttachedWallets(params *ListAttachedWalletsParams, opts ... for _, opt := range opts { opt(op) } - result, err := a.transport.Submit(op) if err != nil { return nil, err } + + // only one success response has to be checked success, ok := result.(*ListAttachedWalletsOK) if ok { return success, nil } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + + // unexpected success response. + + // no default response is defined. + // + // safeguard: normally, in the absence of a default response, unknown success responses return an error above: so this is a codegen issue msg := fmt.Sprintf("unexpected success response for ListAttachedWallets: API contract not enforced by server. Client expected to get an error, but got: %T", result) panic(msg) } diff --git a/client/swagger/models/dataprep_delete_piece_request.go b/client/swagger/models/dataprep_delete_piece_request.go new file mode 100644 index 00000000..fde8528e --- /dev/null +++ b/client/swagger/models/dataprep_delete_piece_request.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DataprepDeletePieceRequest dataprep delete piece request +// +// swagger:model dataprep.DeletePieceRequest +type DataprepDeletePieceRequest struct { + + // Delete the physical CAR file from storage (default: true) + DeleteCar bool `json:"deleteCar,omitempty"` + + // Delete even if deals reference this piece + Force bool `json:"force,omitempty"` +} + +// Validate validates this dataprep delete piece request +func (m *DataprepDeletePieceRequest) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this dataprep delete piece request based on context it is used +func (m *DataprepDeletePieceRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DataprepDeletePieceRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DataprepDeletePieceRequest) UnmarshalBinary(b []byte) error { + var res DataprepDeletePieceRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/dataprep_dir_entry.go b/client/swagger/models/dataprep_dir_entry.go index 445ef06d..bb09e452 100644 --- a/client/swagger/models/dataprep_dir_entry.go +++ b/client/swagger/models/dataprep_dir_entry.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -58,11 +59,15 @@ func (m *DataprepDirEntry) validateFileVersions(formats strfmt.Registry) error { if m.FileVersions[i] != nil { if err := m.FileVersions[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("fileVersions" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("fileVersions" + "." + strconv.Itoa(i)) } + return err } } @@ -97,11 +102,15 @@ func (m *DataprepDirEntry) contextValidateFileVersions(ctx context.Context, form } if err := m.FileVersions[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("fileVersions" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("fileVersions" + "." + strconv.Itoa(i)) } + return err } } diff --git a/client/swagger/models/dataprep_explore_result.go b/client/swagger/models/dataprep_explore_result.go index 5f299029..2cc1898b 100644 --- a/client/swagger/models/dataprep_explore_result.go +++ b/client/swagger/models/dataprep_explore_result.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -55,11 +56,15 @@ func (m *DataprepExploreResult) validateSubEntries(formats strfmt.Registry) erro if m.SubEntries[i] != nil { if err := m.SubEntries[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("subEntries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("subEntries" + "." + strconv.Itoa(i)) } + return err } } @@ -94,11 +99,15 @@ func (m *DataprepExploreResult) contextValidateSubEntries(ctx context.Context, f } if err := m.SubEntries[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("subEntries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("subEntries" + "." + strconv.Itoa(i)) } + return err } } diff --git a/client/swagger/models/dataprep_piece_list.go b/client/swagger/models/dataprep_piece_list.go index c98597bb..5a9595f0 100644 --- a/client/swagger/models/dataprep_piece_list.go +++ b/client/swagger/models/dataprep_piece_list.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -62,11 +63,15 @@ func (m *DataprepPieceList) validatePieces(formats strfmt.Registry) error { if m.Pieces[i] != nil { if err := m.Pieces[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("pieces" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("pieces" + "." + strconv.Itoa(i)) } + return err } } @@ -83,11 +88,15 @@ func (m *DataprepPieceList) validateSource(formats strfmt.Registry) error { if m.Source != nil { if err := m.Source.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("source") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("source") } + return err } } @@ -124,11 +133,15 @@ func (m *DataprepPieceList) contextValidatePieces(ctx context.Context, formats s } if err := m.Pieces[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("pieces" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("pieces" + "." + strconv.Itoa(i)) } + return err } } @@ -147,11 +160,15 @@ func (m *DataprepPieceList) contextValidateSource(ctx context.Context, formats s } if err := m.Source.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("source") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("source") } + return err } } diff --git a/client/swagger/models/deal_list_deal_request.go b/client/swagger/models/deal_list_deal_request.go index e68763f4..0bbad9ab 100644 --- a/client/swagger/models/deal_list_deal_request.go +++ b/client/swagger/models/deal_list_deal_request.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -57,11 +58,15 @@ func (m *DealListDealRequest) validateStates(formats strfmt.Registry) error { for i := 0; i < len(m.States); i++ { if err := m.States[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("states" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("states" + "." + strconv.Itoa(i)) } + return err } @@ -93,11 +98,15 @@ func (m *DealListDealRequest) contextValidateStates(ctx context.Context, formats } if err := m.States[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("states" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("states" + "." + strconv.Itoa(i)) } + return err } diff --git a/client/swagger/models/file_deals_for_file_range.go b/client/swagger/models/file_deals_for_file_range.go index ae2221ec..93b962b5 100644 --- a/client/swagger/models/file_deals_for_file_range.go +++ b/client/swagger/models/file_deals_for_file_range.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -56,11 +57,15 @@ func (m *FileDealsForFileRange) validateDeals(formats strfmt.Registry) error { if m.Deals[i] != nil { if err := m.Deals[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("deals" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("deals" + "." + strconv.Itoa(i)) } + return err } } @@ -77,11 +82,15 @@ func (m *FileDealsForFileRange) validateFileRange(formats strfmt.Registry) error if m.FileRange != nil { if err := m.FileRange.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("fileRange") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("fileRange") } + return err } } @@ -118,11 +127,15 @@ func (m *FileDealsForFileRange) contextValidateDeals(ctx context.Context, format } if err := m.Deals[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("deals" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("deals" + "." + strconv.Itoa(i)) } + return err } } @@ -141,11 +154,15 @@ func (m *FileDealsForFileRange) contextValidateFileRange(ctx context.Context, fo } if err := m.FileRange.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("fileRange") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("fileRange") } + return err } } diff --git a/client/swagger/models/job_source_status.go b/client/swagger/models/job_source_status.go index 7f5fa467..ca6cfdb0 100644 --- a/client/swagger/models/job_source_status.go +++ b/client/swagger/models/job_source_status.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -69,11 +70,15 @@ func (m *JobSourceStatus) validateJobs(formats strfmt.Registry) error { if m.Jobs[i] != nil { if err := m.Jobs[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("jobs" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("jobs" + "." + strconv.Itoa(i)) } + return err } } @@ -95,11 +100,15 @@ func (m *JobSourceStatus) validateOutput(formats strfmt.Registry) error { if m.Output[i] != nil { if err := m.Output[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("output" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("output" + "." + strconv.Itoa(i)) } + return err } } @@ -116,11 +125,15 @@ func (m *JobSourceStatus) validateSource(formats strfmt.Registry) error { if m.Source != nil { if err := m.Source.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("source") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("source") } + return err } } @@ -161,11 +174,15 @@ func (m *JobSourceStatus) contextValidateJobs(ctx context.Context, formats strfm } if err := m.Jobs[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("jobs" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("jobs" + "." + strconv.Itoa(i)) } + return err } } @@ -186,11 +203,15 @@ func (m *JobSourceStatus) contextValidateOutput(ctx context.Context, formats str } if err := m.Output[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("output" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("output" + "." + strconv.Itoa(i)) } + return err } } @@ -209,11 +230,15 @@ func (m *JobSourceStatus) contextValidateSource(ctx context.Context, formats str } if err := m.Source.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("source") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("source") } + return err } } diff --git a/client/swagger/models/model_car.go b/client/swagger/models/model_car.go index a790ea9f..59d9f1b5 100644 --- a/client/swagger/models/model_car.go +++ b/client/swagger/models/model_car.go @@ -32,6 +32,9 @@ type ModelCar struct { // job Id JobID int64 `json:"jobId,omitempty"` + // MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility. + MinPieceSizePadding int64 `json:"minPieceSizePadding,omitempty"` + // num of files NumOfFiles int64 `json:"numOfFiles,omitempty"` @@ -44,7 +47,7 @@ type ModelCar struct { // PieceType indicates whether this is a data piece or DAG piece PieceType string `json:"pieceType,omitempty"` - // Association + // Association - SET NULL for fast prep deletion, async cleanup PreparationID int64 `json:"preparationId,omitempty"` // root cid diff --git a/client/swagger/models/model_deal.go b/client/swagger/models/model_deal.go index eb73cbea..252591e9 100644 --- a/client/swagger/models/model_deal.go +++ b/client/swagger/models/model_deal.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" @@ -96,11 +97,15 @@ func (m *ModelDeal) validateState(formats strfmt.Registry) error { } if err := m.State.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("state") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("state") } + return err } @@ -128,11 +133,15 @@ func (m *ModelDeal) contextValidateState(ctx context.Context, formats strfmt.Reg } if err := m.State.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("state") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("state") } + return err } diff --git a/client/swagger/models/model_deal_state.go b/client/swagger/models/model_deal_state.go index c7d44ea9..15784a35 100644 --- a/client/swagger/models/model_deal_state.go +++ b/client/swagger/models/model_deal_state.go @@ -56,7 +56,7 @@ const ( ) // for schema -var modelDealStateEnum []interface{} +var modelDealStateEnum []any func init() { var res []ModelDealState diff --git a/client/swagger/models/model_file.go b/client/swagger/models/model_file.go index 903bdfe2..926ced7d 100644 --- a/client/swagger/models/model_file.go +++ b/client/swagger/models/model_file.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -19,7 +20,7 @@ import ( // swagger:model model.File type ModelFile struct { - // Associations + // Associations - AttachmentID SET NULL for fast prep deletion, async cleanup AttachmentID int64 `json:"attachmentId,omitempty"` // CID is the CID of the file. @@ -73,11 +74,15 @@ func (m *ModelFile) validateFileRanges(formats strfmt.Registry) error { if m.FileRanges[i] != nil { if err := m.FileRanges[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("fileRanges" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("fileRanges" + "." + strconv.Itoa(i)) } + return err } } @@ -112,11 +117,15 @@ func (m *ModelFile) contextValidateFileRanges(ctx context.Context, formats strfm } if err := m.FileRanges[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("fileRanges" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("fileRanges" + "." + strconv.Itoa(i)) } + return err } } diff --git a/client/swagger/models/model_job.go b/client/swagger/models/model_job.go index 7149a994..4b2b247e 100644 --- a/client/swagger/models/model_job.go +++ b/client/swagger/models/model_job.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" @@ -36,7 +37,7 @@ type ModelJob struct { // type Type ModelJobType `json:"type,omitempty"` - // Associations + // Associations - AttachmentID SET NULL for fast prep deletion, async cleanup WorkerID string `json:"workerId,omitempty"` } @@ -64,11 +65,15 @@ func (m *ModelJob) validateState(formats strfmt.Registry) error { } if err := m.State.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("state") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("state") } + return err } @@ -81,11 +86,15 @@ func (m *ModelJob) validateType(formats strfmt.Registry) error { } if err := m.Type.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("type") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("type") } + return err } @@ -117,11 +126,15 @@ func (m *ModelJob) contextValidateState(ctx context.Context, formats strfmt.Regi } if err := m.State.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("state") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("state") } + return err } @@ -135,11 +148,15 @@ func (m *ModelJob) contextValidateType(ctx context.Context, formats strfmt.Regis } if err := m.Type.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("type") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("type") } + return err } diff --git a/client/swagger/models/model_job_state.go b/client/swagger/models/model_job_state.go index 6b8fc89f..fc813d58 100644 --- a/client/swagger/models/model_job_state.go +++ b/client/swagger/models/model_job_state.go @@ -50,7 +50,7 @@ const ( ) // for schema -var modelJobStateEnum []interface{} +var modelJobStateEnum []any func init() { var res []ModelJobState diff --git a/client/swagger/models/model_job_type.go b/client/swagger/models/model_job_type.go index 295d3bbc..74a50ea0 100644 --- a/client/swagger/models/model_job_type.go +++ b/client/swagger/models/model_job_type.go @@ -41,7 +41,7 @@ const ( ) // for schema -var modelJobTypeEnum []interface{} +var modelJobTypeEnum []any func init() { var res []ModelJobType diff --git a/client/swagger/models/model_preparation.go b/client/swagger/models/model_preparation.go index 16cc6d7b..4a0ed290 100644 --- a/client/swagger/models/model_preparation.go +++ b/client/swagger/models/model_preparation.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -86,11 +87,15 @@ func (m *ModelPreparation) validateOutputStorages(formats strfmt.Registry) error if m.OutputStorages[i] != nil { if err := m.OutputStorages[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("outputStorages" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("outputStorages" + "." + strconv.Itoa(i)) } + return err } } @@ -112,11 +117,15 @@ func (m *ModelPreparation) validateSourceStorages(formats strfmt.Registry) error if m.SourceStorages[i] != nil { if err := m.SourceStorages[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("sourceStorages" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("sourceStorages" + "." + strconv.Itoa(i)) } + return err } } @@ -155,11 +164,15 @@ func (m *ModelPreparation) contextValidateOutputStorages(ctx context.Context, fo } if err := m.OutputStorages[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("outputStorages" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("outputStorages" + "." + strconv.Itoa(i)) } + return err } } @@ -180,11 +193,15 @@ func (m *ModelPreparation) contextValidateSourceStorages(ctx context.Context, fo } if err := m.SourceStorages[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("sourceStorages" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("sourceStorages" + "." + strconv.Itoa(i)) } + return err } } diff --git a/client/swagger/models/model_schedule.go b/client/swagger/models/model_schedule.go index 2451a3f0..96038ee3 100644 --- a/client/swagger/models/model_schedule.go +++ b/client/swagger/models/model_schedule.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" @@ -128,11 +129,15 @@ func (m *ModelSchedule) validateHTTPHeaders(formats strfmt.Registry) error { if m.HTTPHeaders != nil { if err := m.HTTPHeaders.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("httpHeaders") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("httpHeaders") } + return err } } @@ -146,11 +151,15 @@ func (m *ModelSchedule) validateState(formats strfmt.Registry) error { } if err := m.State.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("state") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("state") } + return err } @@ -182,11 +191,15 @@ func (m *ModelSchedule) contextValidateHTTPHeaders(ctx context.Context, formats } if err := m.HTTPHeaders.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("httpHeaders") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("httpHeaders") } + return err } @@ -200,11 +213,15 @@ func (m *ModelSchedule) contextValidateState(ctx context.Context, formats strfmt } if err := m.State.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("state") - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("state") } + return err } diff --git a/client/swagger/models/model_schedule_state.go b/client/swagger/models/model_schedule_state.go index a6d563dc..33501e86 100644 --- a/client/swagger/models/model_schedule_state.go +++ b/client/swagger/models/model_schedule_state.go @@ -44,7 +44,7 @@ const ( ) // for schema -var modelScheduleStateEnum []interface{} +var modelScheduleStateEnum []any func init() { var res []ModelScheduleState diff --git a/client/swagger/models/model_storage.go b/client/swagger/models/model_storage.go index 27c31f12..eedb371e 100644 --- a/client/swagger/models/model_storage.go +++ b/client/swagger/models/model_storage.go @@ -7,6 +7,7 @@ package models import ( "context" + stderrors "errors" "strconv" "github.com/go-openapi/errors" @@ -108,11 +109,15 @@ func (m *ModelStorage) validatePreparationsAsOutput(formats strfmt.Registry) err if m.PreparationsAsOutput[i] != nil { if err := m.PreparationsAsOutput[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("preparationsAsOutput" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("preparationsAsOutput" + "." + strconv.Itoa(i)) } + return err } } @@ -134,11 +139,15 @@ func (m *ModelStorage) validatePreparationsAsSource(formats strfmt.Registry) err if m.PreparationsAsSource[i] != nil { if err := m.PreparationsAsSource[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("preparationsAsSource" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("preparationsAsSource" + "." + strconv.Itoa(i)) } + return err } } @@ -195,11 +204,15 @@ func (m *ModelStorage) contextValidatePreparationsAsOutput(ctx context.Context, } if err := m.PreparationsAsOutput[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("preparationsAsOutput" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("preparationsAsOutput" + "." + strconv.Itoa(i)) } + return err } } @@ -220,11 +233,15 @@ func (m *ModelStorage) contextValidatePreparationsAsSource(ctx context.Context, } if err := m.PreparationsAsSource[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { return ve.ValidateName("preparationsAsSource" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { return ce.ValidateName("preparationsAsSource" + "." + strconv.Itoa(i)) } + return err } } diff --git a/client/swagger/models/storage_acd_config.go b/client/swagger/models/storage_acd_config.go deleted file mode 100644 index 0adb90b3..00000000 --- a/client/swagger/models/storage_acd_config.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// StorageAcdConfig storage acd config -// -// swagger:model storage.acdConfig -type StorageAcdConfig struct { - - // Auth server URL. - AuthURL string `json:"authUrl,omitempty"` - - // Checkpoint for internal polling (debug). - Checkpoint string `json:"checkpoint,omitempty"` - - // OAuth Client Id. - ClientID string `json:"clientId,omitempty"` - - // OAuth Client Secret. - ClientSecret string `json:"clientSecret,omitempty"` - - // The encoding for the backend. - Encoding *string `json:"encoding,omitempty"` - - // Files >= this size will be downloaded via their tempLink. - TemplinkThreshold *string `json:"templinkThreshold,omitempty"` - - // OAuth Access Token as a JSON blob. - Token string `json:"token,omitempty"` - - // Token server url. - TokenURL string `json:"tokenUrl,omitempty"` - - // Additional time per GiB to wait after a failed complete upload to see if it appears. - UploadWaitPerGb *string `json:"uploadWaitPerGb,omitempty"` -} - -// Validate validates this storage acd config -func (m *StorageAcdConfig) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this storage acd config based on context it is used -func (m *StorageAcdConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *StorageAcdConfig) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *StorageAcdConfig) UnmarshalBinary(b []byte) error { - var res StorageAcdConfig - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/client/swagger/models/storage_azureblob_config.go b/client/swagger/models/storage_azureblob_config.go index 88fc284f..72f58db9 100644 --- a/client/swagger/models/storage_azureblob_config.go +++ b/client/swagger/models/storage_azureblob_config.go @@ -17,7 +17,7 @@ import ( // swagger:model storage.azureblobConfig type StorageAzureblobConfig struct { - // Access tier of blob: hot, cool or archive. + // Access tier of blob: hot, cool, cold or archive. AccessTier string `json:"accessTier,omitempty"` // Azure Storage Account Name. @@ -44,6 +44,15 @@ type StorageAzureblobConfig struct { // Send the certificate chain when using certificate auth. ClientSendCertificateChain *bool `json:"clientSendCertificateChain,omitempty"` + // Set to specify how to deal with snapshots on blob deletion. + DeleteSnapshots string `json:"deleteSnapshots,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -62,10 +71,10 @@ type StorageAzureblobConfig struct { // Size of blob list. ListChunk *int64 `json:"listChunk,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Object ID of the user-assigned MSI to use, if any. diff --git a/client/swagger/models/storage_b2_config.go b/client/swagger/models/storage_b2_config.go index a33c11f7..8ae0acb6 100644 --- a/client/swagger/models/storage_b2_config.go +++ b/client/swagger/models/storage_b2_config.go @@ -26,10 +26,13 @@ type StorageB2Config struct { // Cutoff for switching to multipart copy. CopyCutoff *string `json:"copyCutoff,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Disable checksums for large (> upload cutoff) files. DisableChecksum *bool `json:"disableChecksum,omitempty"` - // Time before the authorization token will expire in s or suffix ms|s|m|h|d. + // Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. DownloadAuthDuration *string `json:"downloadAuthDuration,omitempty"` // Custom endpoint for downloads. @@ -47,15 +50,21 @@ type StorageB2Config struct { // Application Key. Key string `json:"key,omitempty"` - // How often internal memory buffer pools will be flushed. + // Set the number of days deleted files should be kept when creating a bucket. + Lifecycle int64 `json:"lifecycle,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // A flag string for X-Bz-Test-Mode header for debugging. TestMode string `json:"testMode,omitempty"` + // Concurrency for multipart uploads. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` diff --git a/client/swagger/models/storage_box_config.go b/client/swagger/models/storage_box_config.go index 50025c3e..32d445fe 100644 --- a/client/swagger/models/storage_box_config.go +++ b/client/swagger/models/storage_box_config.go @@ -39,9 +39,15 @@ type StorageBoxConfig struct { // Max number of times to try committing a multipart file. CommitRetries *int64 `json:"commitRetries,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` + // Impersonate this user ID when using a service account. + Impersonate string `json:"impersonate,omitempty"` + // Size of listing chunk 1-1000. ListChunk *int64 `json:"listChunk,omitempty"` diff --git a/client/swagger/models/storage_create_oos_workload_identity_auth_storage_request.go b/client/swagger/models/storage_create_oos_workload_identity_auth_storage_request.go new file mode 100644 index 00000000..9b30705a --- /dev/null +++ b/client/swagger/models/storage_create_oos_workload_identity_auth_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateOosWorkloadIdentityAuthStorageRequest storage create oos workload identity auth storage request +// +// swagger:model storage.createOosWorkload_identity_authStorageRequest +type StorageCreateOosWorkloadIdentityAuthStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageOosWorkloadIdentityAuthConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create oos workload identity auth storage request +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create oos workload identity auth storage request based on the context it is used +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateOosWorkloadIdentityAuthStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateOosWorkloadIdentityAuthStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_g_c_s_storage_request.go b/client/swagger/models/storage_create_s3_g_c_s_storage_request.go new file mode 100644 index 00000000..23cf0f24 --- /dev/null +++ b/client/swagger/models/storage_create_s3_g_c_s_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3GCSStorageRequest storage create s3 g c s storage request +// +// swagger:model storage.createS3GCSStorageRequest +type StorageCreateS3GCSStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3GCSConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 g c s storage request +func (m *StorageCreateS3GCSStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3GCSStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3GCSStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 g c s storage request based on the context it is used +func (m *StorageCreateS3GCSStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3GCSStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3GCSStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3GCSStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3GCSStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3GCSStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_leviia_storage_request.go b/client/swagger/models/storage_create_s3_leviia_storage_request.go new file mode 100644 index 00000000..0944fb80 --- /dev/null +++ b/client/swagger/models/storage_create_s3_leviia_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3LeviiaStorageRequest storage create s3 leviia storage request +// +// swagger:model storage.createS3LeviiaStorageRequest +type StorageCreateS3LeviiaStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3LeviiaConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 leviia storage request +func (m *StorageCreateS3LeviiaStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3LeviiaStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3LeviiaStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 leviia storage request based on the context it is used +func (m *StorageCreateS3LeviiaStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3LeviiaStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3LeviiaStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3LeviiaStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3LeviiaStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3LeviiaStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_linode_storage_request.go b/client/swagger/models/storage_create_s3_linode_storage_request.go new file mode 100644 index 00000000..077ce1d3 --- /dev/null +++ b/client/swagger/models/storage_create_s3_linode_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3LinodeStorageRequest storage create s3 linode storage request +// +// swagger:model storage.createS3LinodeStorageRequest +type StorageCreateS3LinodeStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3LinodeConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 linode storage request +func (m *StorageCreateS3LinodeStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3LinodeStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3LinodeStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 linode storage request based on the context it is used +func (m *StorageCreateS3LinodeStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3LinodeStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3LinodeStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3LinodeStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3LinodeStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3LinodeStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_magalu_storage_request.go b/client/swagger/models/storage_create_s3_magalu_storage_request.go new file mode 100644 index 00000000..eb3e6afa --- /dev/null +++ b/client/swagger/models/storage_create_s3_magalu_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3MagaluStorageRequest storage create s3 magalu storage request +// +// swagger:model storage.createS3MagaluStorageRequest +type StorageCreateS3MagaluStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3MagaluConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 magalu storage request +func (m *StorageCreateS3MagaluStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3MagaluStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3MagaluStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 magalu storage request based on the context it is used +func (m *StorageCreateS3MagaluStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3MagaluStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3MagaluStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3MagaluStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3MagaluStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3MagaluStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_acd_storage_request.go b/client/swagger/models/storage_create_s3_petabox_storage_request.go similarity index 63% rename from client/swagger/models/storage_create_acd_storage_request.go rename to client/swagger/models/storage_create_s3_petabox_storage_request.go index 308895be..8be850c7 100644 --- a/client/swagger/models/storage_create_acd_storage_request.go +++ b/client/swagger/models/storage_create_s3_petabox_storage_request.go @@ -13,10 +13,10 @@ import ( "github.com/go-openapi/swag" ) -// StorageCreateAcdStorageRequest storage create acd storage request +// StorageCreateS3PetaboxStorageRequest storage create s3 petabox storage request // -// swagger:model storage.createAcdStorageRequest -type StorageCreateAcdStorageRequest struct { +// swagger:model storage.createS3PetaboxStorageRequest +type StorageCreateS3PetaboxStorageRequest struct { // config for underlying HTTP client ClientConfig struct { @@ -25,7 +25,7 @@ type StorageCreateAcdStorageRequest struct { // config for the storage Config struct { - StorageAcdConfig + StorageS3PetaboxConfig } `json:"config,omitempty"` // Name of the storage, must be unique @@ -36,8 +36,8 @@ type StorageCreateAcdStorageRequest struct { Path string `json:"path,omitempty"` } -// Validate validates this storage create acd storage request -func (m *StorageCreateAcdStorageRequest) Validate(formats strfmt.Registry) error { +// Validate validates this storage create s3 petabox storage request +func (m *StorageCreateS3PetaboxStorageRequest) Validate(formats strfmt.Registry) error { var res []error if err := m.validateClientConfig(formats); err != nil { @@ -54,7 +54,7 @@ func (m *StorageCreateAcdStorageRequest) Validate(formats strfmt.Registry) error return nil } -func (m *StorageCreateAcdStorageRequest) validateClientConfig(formats strfmt.Registry) error { +func (m *StorageCreateS3PetaboxStorageRequest) validateClientConfig(formats strfmt.Registry) error { if swag.IsZero(m.ClientConfig) { // not required return nil } @@ -62,7 +62,7 @@ func (m *StorageCreateAcdStorageRequest) validateClientConfig(formats strfmt.Reg return nil } -func (m *StorageCreateAcdStorageRequest) validateConfig(formats strfmt.Registry) error { +func (m *StorageCreateS3PetaboxStorageRequest) validateConfig(formats strfmt.Registry) error { if swag.IsZero(m.Config) { // not required return nil } @@ -70,8 +70,8 @@ func (m *StorageCreateAcdStorageRequest) validateConfig(formats strfmt.Registry) return nil } -// ContextValidate validate this storage create acd storage request based on the context it is used -func (m *StorageCreateAcdStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { +// ContextValidate validate this storage create s3 petabox storage request based on the context it is used +func (m *StorageCreateS3PetaboxStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error if err := m.contextValidateClientConfig(ctx, formats); err != nil { @@ -88,18 +88,18 @@ func (m *StorageCreateAcdStorageRequest) ContextValidate(ctx context.Context, fo return nil } -func (m *StorageCreateAcdStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { +func (m *StorageCreateS3PetaboxStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { return nil } -func (m *StorageCreateAcdStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { +func (m *StorageCreateS3PetaboxStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { return nil } // MarshalBinary interface implementation -func (m *StorageCreateAcdStorageRequest) MarshalBinary() ([]byte, error) { +func (m *StorageCreateS3PetaboxStorageRequest) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -107,8 +107,8 @@ func (m *StorageCreateAcdStorageRequest) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *StorageCreateAcdStorageRequest) UnmarshalBinary(b []byte) error { - var res StorageCreateAcdStorageRequest +func (m *StorageCreateS3PetaboxStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3PetaboxStorageRequest if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/client/swagger/models/storage_create_s3_rclone_storage_request.go b/client/swagger/models/storage_create_s3_rclone_storage_request.go new file mode 100644 index 00000000..92795622 --- /dev/null +++ b/client/swagger/models/storage_create_s3_rclone_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3RcloneStorageRequest storage create s3 rclone storage request +// +// swagger:model storage.createS3RcloneStorageRequest +type StorageCreateS3RcloneStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3RcloneConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 rclone storage request +func (m *StorageCreateS3RcloneStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3RcloneStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3RcloneStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 rclone storage request based on the context it is used +func (m *StorageCreateS3RcloneStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3RcloneStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3RcloneStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3RcloneStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3RcloneStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3RcloneStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_create_s3_synology_storage_request.go b/client/swagger/models/storage_create_s3_synology_storage_request.go new file mode 100644 index 00000000..3b0f1d0c --- /dev/null +++ b/client/swagger/models/storage_create_s3_synology_storage_request.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageCreateS3SynologyStorageRequest storage create s3 synology storage request +// +// swagger:model storage.createS3SynologyStorageRequest +type StorageCreateS3SynologyStorageRequest struct { + + // config for underlying HTTP client + ClientConfig struct { + ModelClientConfig + } `json:"clientConfig,omitempty"` + + // config for the storage + Config struct { + StorageS3SynologyConfig + } `json:"config,omitempty"` + + // Name of the storage, must be unique + // Example: my-storage + Name string `json:"name,omitempty"` + + // Path of the storage + Path string `json:"path,omitempty"` +} + +// Validate validates this storage create s3 synology storage request +func (m *StorageCreateS3SynologyStorageRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClientConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3SynologyStorageRequest) validateClientConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ClientConfig) { // not required + return nil + } + + return nil +} + +func (m *StorageCreateS3SynologyStorageRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + return nil +} + +// ContextValidate validate this storage create s3 synology storage request based on the context it is used +func (m *StorageCreateS3SynologyStorageRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClientConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StorageCreateS3SynologyStorageRequest) contextValidateClientConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +func (m *StorageCreateS3SynologyStorageRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *StorageCreateS3SynologyStorageRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageCreateS3SynologyStorageRequest) UnmarshalBinary(b []byte) error { + var res StorageCreateS3SynologyStorageRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_drive_config.go b/client/swagger/models/storage_drive_config.go index f13fb5d2..a7c9ad1f 100644 --- a/client/swagger/models/storage_drive_config.go +++ b/client/swagger/models/storage_drive_config.go @@ -44,15 +44,25 @@ type StorageDriveConfig struct { // Server side copy contents of shortcuts instead of the shortcut. CopyShortcutContent *bool `json:"copyShortcutContent,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Disable drive using http2. DisableHttp2 *bool `json:"disableHttp2,omitempty"` // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` + // Get IAM credentials from runtime (environment variables or instance meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + // Comma separated list of preferred formats for downloading Google docs. ExportFormats *string `json:"exportFormats,omitempty"` + // Work around a bug in Google Drive listing. + FastListBugFix *bool `json:"fastListBugFix,omitempty"` + // Deprecated: See export_formats. Formats string `json:"formats,omitempty"` @@ -68,6 +78,18 @@ type StorageDriveConfig struct { // Size of listing chunk 100-1000, 0 to disable. ListChunk *int64 `json:"listChunk,omitempty"` + // Control whether labels should be read or written in metadata. + // Example: off + MetadataLabels *string `json:"metadataLabels,omitempty"` + + // Control whether owner should be read or written in metadata. + // Example: off + MetadataOwner *string `json:"metadataOwner,omitempty"` + + // Control whether permissions should be read or written in metadata. + // Example: off + MetadataPermissions *string `json:"metadataPermissions,omitempty"` + // Number of API calls to allow without sleeping. PacerBurst *int64 `json:"pacerBurst,omitempty"` @@ -80,11 +102,11 @@ type StorageDriveConfig struct { // ID of the root folder. RootFolderID string `json:"rootFolderId,omitempty"` - // Scope that rclone should use when requesting access from drive. + // Comma separated list of scopes that rclone should use when requesting access from drive. // Example: drive Scope string `json:"scope,omitempty"` - // Allow server-side operations (e.g. copy) to work across different drive configs. + // Deprecated: use --server-side-across-configs instead. ServerSideAcrossConfigs *bool `json:"serverSideAcrossConfigs,omitempty"` // Service Account Credentials JSON blob. @@ -96,10 +118,13 @@ type StorageDriveConfig struct { // Only show files that are shared with me. SharedWithMe *bool `json:"sharedWithMe,omitempty"` + // Show all Google Docs including non-exportable ones in listings. + ShowAllGdocs *bool `json:"showAllGdocs,omitempty"` + // Show sizes as storage quota usage, not actual size. SizeAsQuota *bool `json:"sizeAsQuota,omitempty"` - // Skip MD5 checksum on Google photos and videos only. + // Skip checksums on Google photos and videos only. SkipChecksumGphotos *bool `json:"skipChecksumGphotos,omitempty"` // If set skip dangling shortcut files. diff --git a/client/swagger/models/storage_dropbox_config.go b/client/swagger/models/storage_dropbox_config.go index 6c7950f7..637ec7dc 100644 --- a/client/swagger/models/storage_dropbox_config.go +++ b/client/swagger/models/storage_dropbox_config.go @@ -41,12 +41,21 @@ type StorageDropboxConfig struct { // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` // Impersonate this user when using a business account. Impersonate string `json:"impersonate,omitempty"` + // Minimum time to sleep between API calls. + PacerMinSleep *string `json:"pacerMinSleep,omitempty"` + + // Specify a different Dropbox namespace ID to use as the root for all paths. + RootNamespace string `json:"rootNamespace,omitempty"` + // Instructs rclone to work on individual shared files. SharedFiles *bool `json:"sharedFiles,omitempty"` diff --git a/client/swagger/models/storage_fichier_config.go b/client/swagger/models/storage_fichier_config.go index 223cfca6..94406395 100644 --- a/client/swagger/models/storage_fichier_config.go +++ b/client/swagger/models/storage_fichier_config.go @@ -20,6 +20,12 @@ type StorageFichierConfig struct { // Your API Key, get it from https://1fichier.com/console/params.pl. APIKey string `json:"apiKey,omitempty"` + // Set if you wish to use CDN download links. + Cdn *bool `json:"cdn,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_filefabric_config.go b/client/swagger/models/storage_filefabric_config.go index 95cf1e0b..c9294a98 100644 --- a/client/swagger/models/storage_filefabric_config.go +++ b/client/swagger/models/storage_filefabric_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.filefabricConfig type StorageFilefabricConfig struct { + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_ftp_config.go b/client/swagger/models/storage_ftp_config.go index 8c797768..a5de21b2 100644 --- a/client/swagger/models/storage_ftp_config.go +++ b/client/swagger/models/storage_ftp_config.go @@ -26,6 +26,9 @@ type StorageFtpConfig struct { // Maximum number of FTP simultaneous connections, 0 for unlimited. Concurrency int64 `json:"concurrency,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Disable using EPSV even if server advertises support. DisableEpsv *bool `json:"disableEpsv,omitempty"` @@ -66,6 +69,9 @@ type StorageFtpConfig struct { // Maximum time to wait for data connection closing status. ShutTimeout *string `json:"shutTimeout,omitempty"` + // Socks 5 proxy host. + SocksProxy string `json:"socksProxy,omitempty"` + // Use Implicit FTPS (FTP over TLS). TLS *bool `json:"tls,omitempty"` diff --git a/client/swagger/models/storage_gcs_config.go b/client/swagger/models/storage_gcs_config.go index bcd97496..3dfafd06 100644 --- a/client/swagger/models/storage_gcs_config.go +++ b/client/swagger/models/storage_gcs_config.go @@ -39,6 +39,12 @@ type StorageGcsConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` @@ -76,6 +82,9 @@ type StorageGcsConfig struct { // Token server url. TokenURL string `json:"tokenUrl,omitempty"` + + // User project. + UserProject string `json:"userProject,omitempty"` } // Validate validates this storage gcs config diff --git a/client/swagger/models/storage_gphotos_config.go b/client/swagger/models/storage_gphotos_config.go index 39ad8f1f..b1a85bbd 100644 --- a/client/swagger/models/storage_gphotos_config.go +++ b/client/swagger/models/storage_gphotos_config.go @@ -20,12 +20,27 @@ type StorageGphotosConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Max time to wait for a batch to finish committing + BatchCommitTimeout *string `json:"batchCommitTimeout,omitempty"` + + // Upload file batching sync|async|off. + BatchMode *string `json:"batchMode,omitempty"` + + // Max number of files in upload batch. + BatchSize int64 `json:"batchSize,omitempty"` + + // Max time to allow an idle upload batch before uploading. + BatchTimeout *string `json:"batchTimeout,omitempty"` + // OAuth Client Id. ClientID string `json:"clientId,omitempty"` // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_hdfs_config.go b/client/swagger/models/storage_hdfs_config.go index 250de86c..90552b34 100644 --- a/client/swagger/models/storage_hdfs_config.go +++ b/client/swagger/models/storage_hdfs_config.go @@ -21,10 +21,13 @@ type StorageHdfsConfig struct { // Example: privacy DataTransferProtection string `json:"dataTransferProtection,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` - // Hadoop name node and port. + // Hadoop name nodes and ports. Namenode string `json:"namenode,omitempty"` // Kerberos service principal name for the namenode. diff --git a/client/swagger/models/storage_hidrive_config.go b/client/swagger/models/storage_hidrive_config.go index fe281863..3d9a8157 100644 --- a/client/swagger/models/storage_hidrive_config.go +++ b/client/swagger/models/storage_hidrive_config.go @@ -29,6 +29,9 @@ type StorageHidriveConfig struct { // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Do not fetch number of objects in directories unless it is absolutely necessary. DisableFetchingMemberCount *bool `json:"disableFetchingMemberCount,omitempty"` diff --git a/client/swagger/models/storage_http_config.go b/client/swagger/models/storage_http_config.go index ef198027..fd3c54bb 100644 --- a/client/swagger/models/storage_http_config.go +++ b/client/swagger/models/storage_http_config.go @@ -17,9 +17,15 @@ import ( // swagger:model storage.httpConfig type StorageHTTPConfig struct { + // Description of the remote. + Description string `json:"description,omitempty"` + // Set HTTP headers for all transactions. Headers string `json:"headers,omitempty"` + // Do not escape URL metacharacters in path names. + NoEscape *bool `json:"noEscape,omitempty"` + // Don't use HEAD requests. NoHead *bool `json:"noHead,omitempty"` diff --git a/client/swagger/models/storage_internetarchive_config.go b/client/swagger/models/storage_internetarchive_config.go index 00453ded..38db46d9 100644 --- a/client/swagger/models/storage_internetarchive_config.go +++ b/client/swagger/models/storage_internetarchive_config.go @@ -20,6 +20,9 @@ type StorageInternetarchiveConfig struct { // IAS3 Access Key. AccessKeyID string `json:"accessKeyId,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Don't ask the server to test against MD5 checksum calculated by rclone. DisableChecksum *bool `json:"disableChecksum,omitempty"` diff --git a/client/swagger/models/storage_jottacloud_config.go b/client/swagger/models/storage_jottacloud_config.go index 97654c24..4ecc2644 100644 --- a/client/swagger/models/storage_jottacloud_config.go +++ b/client/swagger/models/storage_jottacloud_config.go @@ -17,6 +17,18 @@ import ( // swagger:model storage.jottacloudConfig type StorageJottacloudConfig struct { + // Auth server URL. + AuthURL string `json:"authUrl,omitempty"` + + // OAuth Client Id. + ClientID string `json:"clientId,omitempty"` + + // OAuth Client Secret. + ClientSecret string `json:"clientSecret,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` @@ -29,6 +41,12 @@ type StorageJottacloudConfig struct { // Avoid server side versioning by deleting files and recreating files instead of overwriting them. NoVersions *bool `json:"noVersions,omitempty"` + // OAuth Access Token as a JSON blob. + Token string `json:"token,omitempty"` + + // Token server url. + TokenURL string `json:"tokenUrl,omitempty"` + // Only show files that are in the trash. TrashedOnly *bool `json:"trashedOnly,omitempty"` diff --git a/client/swagger/models/storage_koofr_digistorage_config.go b/client/swagger/models/storage_koofr_digistorage_config.go index 94282672..0b8fcb4a 100644 --- a/client/swagger/models/storage_koofr_digistorage_config.go +++ b/client/swagger/models/storage_koofr_digistorage_config.go @@ -17,13 +17,16 @@ import ( // swagger:model storage.koofrDigistorageConfig type StorageKoofrDigistorageConfig struct { + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` // Mount ID of the mount to use. Mountid string `json:"mountid,omitempty"` - // Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). + // Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. Password string `json:"password,omitempty"` // Does the backend support setting modification time. diff --git a/client/swagger/models/storage_koofr_koofr_config.go b/client/swagger/models/storage_koofr_koofr_config.go index ffac2bff..8a88e68c 100644 --- a/client/swagger/models/storage_koofr_koofr_config.go +++ b/client/swagger/models/storage_koofr_koofr_config.go @@ -17,13 +17,16 @@ import ( // swagger:model storage.koofrKoofrConfig type StorageKoofrKoofrConfig struct { + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` // Mount ID of the mount to use. Mountid string `json:"mountid,omitempty"` - // Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). + // Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. Password string `json:"password,omitempty"` // Does the backend support setting modification time. diff --git a/client/swagger/models/storage_koofr_other_config.go b/client/swagger/models/storage_koofr_other_config.go index f5794b69..9fd91267 100644 --- a/client/swagger/models/storage_koofr_other_config.go +++ b/client/swagger/models/storage_koofr_other_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.koofrOtherConfig type StorageKoofrOtherConfig struct { + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_local_config.go b/client/swagger/models/storage_local_config.go index 909ed6a7..722e8213 100644 --- a/client/swagger/models/storage_local_config.go +++ b/client/swagger/models/storage_local_config.go @@ -26,6 +26,9 @@ type StorageLocalConfig struct { // Follow symlinks and copy the pointed to item. CopyLinks *bool `json:"copyLinks,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` @@ -35,6 +38,9 @@ type StorageLocalConfig struct { // Don't check to see if the files change during upload. NoCheckUpdated *bool `json:"noCheckUpdated,omitempty"` + // Disable reflink cloning for server-side copies. + NoClone *bool `json:"noClone,omitempty"` + // Disable preallocation of disk space for transferred files. NoPreallocate *bool `json:"noPreallocate,omitempty"` @@ -54,6 +60,10 @@ type StorageLocalConfig struct { // Don't warn about skipped symlinks. SkipLinks *bool `json:"skipLinks,omitempty"` + // Set what kind of time is returned. + // Example: mtime + TimeType *string `json:"timeType,omitempty"` + // Apply unicode NFC normalization to paths and filenames. UnicodeNormalization *bool `json:"unicodeNormalization,omitempty"` diff --git a/client/swagger/models/storage_mailru_config.go b/client/swagger/models/storage_mailru_config.go index 091e8c5f..3db418ca 100644 --- a/client/swagger/models/storage_mailru_config.go +++ b/client/swagger/models/storage_mailru_config.go @@ -17,10 +17,22 @@ import ( // swagger:model storage.mailruConfig type StorageMailruConfig struct { + // Auth server URL. + AuthURL string `json:"authUrl,omitempty"` + // What should copy do if file checksum is mismatched or invalid. // Example: true CheckHash *bool `json:"checkHash,omitempty"` + // OAuth Client Id. + ClientID string `json:"clientId,omitempty"` + + // OAuth Client Secret. + ClientSecret string `json:"clientSecret,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` @@ -45,6 +57,12 @@ type StorageMailruConfig struct { // Example: 0 SpeedupMaxMemory *string `json:"speedupMaxMemory,omitempty"` + // OAuth Access Token as a JSON blob. + Token string `json:"token,omitempty"` + + // Token server url. + TokenURL string `json:"tokenUrl,omitempty"` + // User name (usually email). User string `json:"user,omitempty"` diff --git a/client/swagger/models/storage_mega_config.go b/client/swagger/models/storage_mega_config.go index 1fe64413..97d9769d 100644 --- a/client/swagger/models/storage_mega_config.go +++ b/client/swagger/models/storage_mega_config.go @@ -20,6 +20,9 @@ type StorageMegaConfig struct { // Output more debug from Mega. Debug *bool `json:"debug,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_netstorage_config.go b/client/swagger/models/storage_netstorage_config.go index 005e774d..1c0479cd 100644 --- a/client/swagger/models/storage_netstorage_config.go +++ b/client/swagger/models/storage_netstorage_config.go @@ -20,6 +20,9 @@ type StorageNetstorageConfig struct { // Set the NetStorage account name Account string `json:"account,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Domain+path of NetStorage host to connect to. Host string `json:"host,omitempty"` diff --git a/client/swagger/models/storage_onedrive_config.go b/client/swagger/models/storage_onedrive_config.go index ce8b0a07..e2ad854f 100644 --- a/client/swagger/models/storage_onedrive_config.go +++ b/client/swagger/models/storage_onedrive_config.go @@ -24,6 +24,9 @@ type StorageOnedriveConfig struct { // Auth server URL. AuthURL string `json:"authUrl,omitempty"` + // Allows download of files the server thinks has a virus. + AvOverride *bool `json:"avOverride,omitempty"` + // Chunk size to upload files with - must be multiple of 320k (327,680 bytes). ChunkSize *string `json:"chunkSize,omitempty"` @@ -33,6 +36,12 @@ type StorageOnedriveConfig struct { // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // If set rclone will use delta listing to implement recursive listings. + Delta *bool `json:"delta,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // Disable the request for Sites.Read.All permission. DisableSitePermission *bool `json:"disableSitePermission,omitempty"` @@ -48,6 +57,9 @@ type StorageOnedriveConfig struct { // Set to make OneNote files show up in directory listings. ExposeOnenoteFiles *bool `json:"exposeOnenoteFiles,omitempty"` + // Permanently delete files on removal. + HardDelete *bool `json:"hardDelete,omitempty"` + // Specify the hash in use for the backend. // Example: auto HashType *string `json:"hashType,omitempty"` @@ -66,6 +78,10 @@ type StorageOnedriveConfig struct { // Size of listing chunk. ListChunk *int64 `json:"listChunk,omitempty"` + // Control whether permissions should be read or written in metadata. + // Example: off + MetadataPermissions *string `json:"metadataPermissions,omitempty"` + // Remove all versions on modifying operations. NoVersions *bool `json:"noVersions,omitempty"` @@ -76,7 +92,7 @@ type StorageOnedriveConfig struct { // ID of the root folder. RootFolderID string `json:"rootFolderId,omitempty"` - // Allow server-side operations (e.g. copy) to work across different onedrive configs. + // Deprecated: use --server-side-across-configs instead. ServerSideAcrossConfigs *bool `json:"serverSideAcrossConfigs,omitempty"` // OAuth Access Token as a JSON blob. diff --git a/client/swagger/models/storage_oos_env_auth_config.go b/client/swagger/models/storage_oos_env_auth_config.go index 8929a3d2..b7457c93 100644 --- a/client/swagger/models/storage_oos_env_auth_config.go +++ b/client/swagger/models/storage_oos_env_auth_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.oosEnv_authConfig type StorageOosEnvAuthConfig struct { + // If true attempt to resume previously started multipart upload for the object. + AttemptResumeUpload *bool `json:"attemptResumeUpload,omitempty"` + // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -29,6 +32,9 @@ type StorageOosEnvAuthConfig struct { // Timeout for copy. CopyTimeout *string `json:"copyTimeout,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -38,9 +44,12 @@ type StorageOosEnvAuthConfig struct { // Endpoint for Object storage API. Endpoint string `json:"endpoint,omitempty"` - // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. LeavePartsOnError *bool `json:"leavePartsOnError,omitempty"` + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + // Object storage namespace Namespace string `json:"namespace,omitempty"` @@ -62,7 +71,7 @@ type StorageOosEnvAuthConfig struct { // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption SseCustomerKeySha256 string `json:"sseCustomerKeySha256,omitempty"` - // if using using your own master key in vault, this header specifies the + // if using your own master key in vault, this header specifies the SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm diff --git a/client/swagger/models/storage_oos_instance_principal_auth_config.go b/client/swagger/models/storage_oos_instance_principal_auth_config.go index c0bcff03..a31d97b0 100644 --- a/client/swagger/models/storage_oos_instance_principal_auth_config.go +++ b/client/swagger/models/storage_oos_instance_principal_auth_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.oosInstance_principal_authConfig type StorageOosInstancePrincipalAuthConfig struct { + // If true attempt to resume previously started multipart upload for the object. + AttemptResumeUpload *bool `json:"attemptResumeUpload,omitempty"` + // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -29,6 +32,9 @@ type StorageOosInstancePrincipalAuthConfig struct { // Timeout for copy. CopyTimeout *string `json:"copyTimeout,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -38,9 +44,12 @@ type StorageOosInstancePrincipalAuthConfig struct { // Endpoint for Object storage API. Endpoint string `json:"endpoint,omitempty"` - // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. LeavePartsOnError *bool `json:"leavePartsOnError,omitempty"` + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + // Object storage namespace Namespace string `json:"namespace,omitempty"` @@ -62,7 +71,7 @@ type StorageOosInstancePrincipalAuthConfig struct { // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption SseCustomerKeySha256 string `json:"sseCustomerKeySha256,omitempty"` - // if using using your own master key in vault, this header specifies the + // if using your own master key in vault, this header specifies the SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm diff --git a/client/swagger/models/storage_oos_no_auth_config.go b/client/swagger/models/storage_oos_no_auth_config.go index fd20873f..e0e53bf5 100644 --- a/client/swagger/models/storage_oos_no_auth_config.go +++ b/client/swagger/models/storage_oos_no_auth_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.oosNo_authConfig type StorageOosNoAuthConfig struct { + // If true attempt to resume previously started multipart upload for the object. + AttemptResumeUpload *bool `json:"attemptResumeUpload,omitempty"` + // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -26,6 +29,9 @@ type StorageOosNoAuthConfig struct { // Timeout for copy. CopyTimeout *string `json:"copyTimeout,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -35,9 +41,12 @@ type StorageOosNoAuthConfig struct { // Endpoint for Object storage API. Endpoint string `json:"endpoint,omitempty"` - // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. LeavePartsOnError *bool `json:"leavePartsOnError,omitempty"` + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + // Object storage namespace Namespace string `json:"namespace,omitempty"` @@ -59,7 +68,7 @@ type StorageOosNoAuthConfig struct { // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption SseCustomerKeySha256 string `json:"sseCustomerKeySha256,omitempty"` - // if using using your own master key in vault, this header specifies the + // if using your own master key in vault, this header specifies the SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm diff --git a/client/swagger/models/storage_oos_resource_principal_auth_config.go b/client/swagger/models/storage_oos_resource_principal_auth_config.go index c9e5dfc3..d4ec79a3 100644 --- a/client/swagger/models/storage_oos_resource_principal_auth_config.go +++ b/client/swagger/models/storage_oos_resource_principal_auth_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.oosResource_principal_authConfig type StorageOosResourcePrincipalAuthConfig struct { + // If true attempt to resume previously started multipart upload for the object. + AttemptResumeUpload *bool `json:"attemptResumeUpload,omitempty"` + // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -29,6 +32,9 @@ type StorageOosResourcePrincipalAuthConfig struct { // Timeout for copy. CopyTimeout *string `json:"copyTimeout,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -38,9 +44,12 @@ type StorageOosResourcePrincipalAuthConfig struct { // Endpoint for Object storage API. Endpoint string `json:"endpoint,omitempty"` - // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. LeavePartsOnError *bool `json:"leavePartsOnError,omitempty"` + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + // Object storage namespace Namespace string `json:"namespace,omitempty"` @@ -62,7 +71,7 @@ type StorageOosResourcePrincipalAuthConfig struct { // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption SseCustomerKeySha256 string `json:"sseCustomerKeySha256,omitempty"` - // if using using your own master key in vault, this header specifies the + // if using your own master key in vault, this header specifies the SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm diff --git a/client/swagger/models/storage_oos_user_principal_auth_config.go b/client/swagger/models/storage_oos_user_principal_auth_config.go index 09869ffb..75bd0e15 100644 --- a/client/swagger/models/storage_oos_user_principal_auth_config.go +++ b/client/swagger/models/storage_oos_user_principal_auth_config.go @@ -17,6 +17,9 @@ import ( // swagger:model storage.oosUser_principal_authConfig type StorageOosUserPrincipalAuthConfig struct { + // If true attempt to resume previously started multipart upload for the object. + AttemptResumeUpload *bool `json:"attemptResumeUpload,omitempty"` + // Chunk size to use for uploading. ChunkSize *string `json:"chunkSize,omitempty"` @@ -37,6 +40,9 @@ type StorageOosUserPrincipalAuthConfig struct { // Timeout for copy. CopyTimeout *string `json:"copyTimeout,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -46,9 +52,12 @@ type StorageOosUserPrincipalAuthConfig struct { // Endpoint for Object storage API. Endpoint string `json:"endpoint,omitempty"` - // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. LeavePartsOnError *bool `json:"leavePartsOnError,omitempty"` + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + // Object storage namespace Namespace string `json:"namespace,omitempty"` @@ -70,7 +79,7 @@ type StorageOosUserPrincipalAuthConfig struct { // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption SseCustomerKeySha256 string `json:"sseCustomerKeySha256,omitempty"` - // if using using your own master key in vault, this header specifies the + // if using your own master key in vault, this header specifies the SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm diff --git a/client/swagger/models/storage_oos_workload_identity_auth_config.go b/client/swagger/models/storage_oos_workload_identity_auth_config.go new file mode 100644 index 00000000..58a4ba8c --- /dev/null +++ b/client/swagger/models/storage_oos_workload_identity_auth_config.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageOosWorkloadIdentityAuthConfig storage oos workload identity auth config +// +// swagger:model storage.oosWorkload_identity_authConfig +type StorageOosWorkloadIdentityAuthConfig struct { + + // If true attempt to resume previously started multipart upload for the object. + AttemptResumeUpload *bool `json:"attemptResumeUpload,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Object storage compartment OCID + Compartment string `json:"compartment,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // Timeout for copy. + CopyTimeout *string `json:"copyTimeout,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for Object storage API. + Endpoint string `json:"endpoint,omitempty"` + + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + LeavePartsOnError *bool `json:"leavePartsOnError,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // Object storage namespace + Namespace string `json:"namespace,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // Object storage Region + Region string `json:"region,omitempty"` + + // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm,omitempty"` + + // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to + SseCustomerKey string `json:"sseCustomerKey,omitempty"` + + // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated + SseCustomerKeyFile string `json:"sseCustomerKeyFile,omitempty"` + + // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption + SseCustomerKeySha256 string `json:"sseCustomerKeySha256,omitempty"` + + // if using your own master key in vault, this header specifies the + SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` + + // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm + // Example: Standard + StorageTier *string `json:"storageTier,omitempty"` + + // Concurrency for multipart uploads. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` +} + +// Validate validates this storage oos workload identity auth config +func (m *StorageOosWorkloadIdentityAuthConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage oos workload identity auth config based on context it is used +func (m *StorageOosWorkloadIdentityAuthConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageOosWorkloadIdentityAuthConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageOosWorkloadIdentityAuthConfig) UnmarshalBinary(b []byte) error { + var res StorageOosWorkloadIdentityAuthConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_opendrive_config.go b/client/swagger/models/storage_opendrive_config.go index 027768f9..7bb11533 100644 --- a/client/swagger/models/storage_opendrive_config.go +++ b/client/swagger/models/storage_opendrive_config.go @@ -20,6 +20,9 @@ type StorageOpendriveConfig struct { // Files will be uploaded in chunks this size. ChunkSize *string `json:"chunkSize,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_pcloud_config.go b/client/swagger/models/storage_pcloud_config.go index 9467fbd3..bf27af88 100644 --- a/client/swagger/models/storage_pcloud_config.go +++ b/client/swagger/models/storage_pcloud_config.go @@ -26,6 +26,9 @@ type StoragePcloudConfig struct { // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_premiumizeme_config.go b/client/swagger/models/storage_premiumizeme_config.go index d4af2ad7..cbda1870 100644 --- a/client/swagger/models/storage_premiumizeme_config.go +++ b/client/swagger/models/storage_premiumizeme_config.go @@ -20,8 +20,26 @@ type StoragePremiumizemeConfig struct { // API Key. APIKey string `json:"apiKey,omitempty"` + // Auth server URL. + AuthURL string `json:"authUrl,omitempty"` + + // OAuth Client Id. + ClientID string `json:"clientId,omitempty"` + + // OAuth Client Secret. + ClientSecret string `json:"clientSecret,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` + + // OAuth Access Token as a JSON blob. + Token string `json:"token,omitempty"` + + // Token server url. + TokenURL string `json:"tokenUrl,omitempty"` } // Validate validates this storage premiumizeme config diff --git a/client/swagger/models/storage_putio_config.go b/client/swagger/models/storage_putio_config.go index 5906ae7e..9c3b6b4b 100644 --- a/client/swagger/models/storage_putio_config.go +++ b/client/swagger/models/storage_putio_config.go @@ -17,8 +17,26 @@ import ( // swagger:model storage.putioConfig type StoragePutioConfig struct { + // Auth server URL. + AuthURL string `json:"authUrl,omitempty"` + + // OAuth Client Id. + ClientID string `json:"clientId,omitempty"` + + // OAuth Client Secret. + ClientSecret string `json:"clientSecret,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` + + // OAuth Access Token as a JSON blob. + Token string `json:"token,omitempty"` + + // Token server url. + TokenURL string `json:"tokenUrl,omitempty"` } // Validate validates this storage putio config diff --git a/client/swagger/models/storage_qingstor_config.go b/client/swagger/models/storage_qingstor_config.go index 1d56368f..2fcbf7b5 100644 --- a/client/swagger/models/storage_qingstor_config.go +++ b/client/swagger/models/storage_qingstor_config.go @@ -26,6 +26,9 @@ type StorageQingstorConfig struct { // Number of connection retries. ConnectionRetries *int64 `json:"connectionRetries,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_s3_a_w_s_config.go b/client/swagger/models/storage_s3_a_w_s_config.go index 5caef943..6f14973d 100644 --- a/client/swagger/models/storage_s3_a_w_s_config.go +++ b/client/swagger/models/storage_s3_a_w_s_config.go @@ -36,6 +36,12 @@ type StorageS3AWSConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -76,10 +82,10 @@ type StorageS3AWSConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -107,6 +113,9 @@ type StorageS3AWSConfig struct { // Enables requester pays option when interacting with S3 bucket. RequesterPays *bool `json:"requesterPays,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -137,10 +146,10 @@ type StorageS3AWSConfig struct { // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` - // Endpoint for STS. + // Endpoint for STS (deprecated). StsEndpoint string `json:"stsEndpoint,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. @@ -149,18 +158,36 @@ type StorageS3AWSConfig struct { // If true use the AWS S3 accelerated endpoint. UseAccelerateEndpoint *bool `json:"useAccelerateEndpoint,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_alibaba_config.go b/client/swagger/models/storage_s3_alibaba_config.go index 03dd8576..3851a83b 100644 --- a/client/swagger/models/storage_s3_alibaba_config.go +++ b/client/swagger/models/storage_s3_alibaba_config.go @@ -36,6 +36,12 @@ type StorageS3AlibabaConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -71,10 +77,10 @@ type StorageS3AlibabaConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -95,6 +101,9 @@ type StorageS3AlibabaConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -107,24 +116,42 @@ type StorageS3AlibabaConfig struct { // The storage class to use when storing new objects in OSS. StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_arvan_cloud_config.go b/client/swagger/models/storage_s3_arvan_cloud_config.go index d3f66590..85a9aea1 100644 --- a/client/swagger/models/storage_s3_arvan_cloud_config.go +++ b/client/swagger/models/storage_s3_arvan_cloud_config.go @@ -36,6 +36,12 @@ type StorageS3ArvanCloudConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -49,7 +55,7 @@ type StorageS3ArvanCloudConfig struct { Encoding *string `json:"encoding,omitempty"` // Endpoint for Arvan Cloud Object Storage (AOS) API. - // Example: s3.ir-thr-at1.arvanstorage.com + // Example: s3.ir-thr-at1.arvanstorage.ir Endpoint string `json:"endpoint,omitempty"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). @@ -75,10 +81,10 @@ type StorageS3ArvanCloudConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -99,6 +105,9 @@ type StorageS3ArvanCloudConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -112,24 +121,42 @@ type StorageS3ArvanCloudConfig struct { // Example: STANDARD StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_ceph_config.go b/client/swagger/models/storage_s3_ceph_config.go index 7d251158..748c80a7 100644 --- a/client/swagger/models/storage_s3_ceph_config.go +++ b/client/swagger/models/storage_s3_ceph_config.go @@ -36,6 +36,12 @@ type StorageS3CephConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -73,10 +79,10 @@ type StorageS3CephConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -100,6 +106,9 @@ type StorageS3CephConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -127,24 +136,42 @@ type StorageS3CephConfig struct { // If using KMS ID you must provide the ARN of Key. SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_china_mobile_config.go b/client/swagger/models/storage_s3_china_mobile_config.go index e2a69b84..55ece722 100644 --- a/client/swagger/models/storage_s3_china_mobile_config.go +++ b/client/swagger/models/storage_s3_china_mobile_config.go @@ -36,6 +36,12 @@ type StorageS3ChinaMobileConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -75,10 +81,10 @@ type StorageS3ChinaMobileConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -99,6 +105,9 @@ type StorageS3ChinaMobileConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -126,24 +135,42 @@ type StorageS3ChinaMobileConfig struct { // The storage class to use when storing new objects in ChinaMobile. StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_cloudflare_config.go b/client/swagger/models/storage_s3_cloudflare_config.go index 209b1c39..5ac2d2ba 100644 --- a/client/swagger/models/storage_s3_cloudflare_config.go +++ b/client/swagger/models/storage_s3_cloudflare_config.go @@ -33,6 +33,12 @@ type StorageS3CloudflareConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -67,10 +73,10 @@ type StorageS3CloudflareConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -95,6 +101,9 @@ type StorageS3CloudflareConfig struct { // Example: auto Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -104,24 +113,42 @@ type StorageS3CloudflareConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_digital_ocean_config.go b/client/swagger/models/storage_s3_digital_ocean_config.go index c4c99921..f73c444c 100644 --- a/client/swagger/models/storage_s3_digital_ocean_config.go +++ b/client/swagger/models/storage_s3_digital_ocean_config.go @@ -36,6 +36,12 @@ type StorageS3DigitalOceanConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -74,10 +80,10 @@ type StorageS3DigitalOceanConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -101,6 +107,9 @@ type StorageS3DigitalOceanConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -110,24 +119,42 @@ type StorageS3DigitalOceanConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_dreamhost_config.go b/client/swagger/models/storage_s3_dreamhost_config.go index b6a05c20..6e7d903a 100644 --- a/client/swagger/models/storage_s3_dreamhost_config.go +++ b/client/swagger/models/storage_s3_dreamhost_config.go @@ -36,6 +36,12 @@ type StorageS3DreamhostConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -74,10 +80,10 @@ type StorageS3DreamhostConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -101,6 +107,9 @@ type StorageS3DreamhostConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -110,24 +119,42 @@ type StorageS3DreamhostConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_g_c_s_config.go b/client/swagger/models/storage_s3_g_c_s_config.go new file mode 100644 index 00000000..15bdadd1 --- /dev/null +++ b/client/swagger/models/storage_s3_g_c_s_config.go @@ -0,0 +1,188 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3GCSConfig storage s3 g c s config +// +// swagger:model storage.s3GCSConfig +type StorageS3GCSConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for Google Cloud Storage. + // Example: https://storage.googleapis.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + Region string `json:"region,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 g c s config +func (m *StorageS3GCSConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 g c s config based on context it is used +func (m *StorageS3GCSConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3GCSConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3GCSConfig) UnmarshalBinary(b []byte) error { + var res StorageS3GCSConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_huawei_o_b_s_config.go b/client/swagger/models/storage_s3_huawei_o_b_s_config.go index 020bccae..e5b78005 100644 --- a/client/swagger/models/storage_s3_huawei_o_b_s_config.go +++ b/client/swagger/models/storage_s3_huawei_o_b_s_config.go @@ -36,6 +36,12 @@ type StorageS3HuaweiOBSConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -71,10 +77,10 @@ type StorageS3HuaweiOBSConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -99,6 +105,9 @@ type StorageS3HuaweiOBSConfig struct { // Example: af-south-1 Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -108,24 +117,42 @@ type StorageS3HuaweiOBSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go b/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go index 2a3fcf68..eedacde0 100644 --- a/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go +++ b/client/swagger/models/storage_s3_i_b_m_c_o_s_config.go @@ -37,6 +37,12 @@ type StorageS3IBMCOSConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -76,10 +82,10 @@ type StorageS3IBMCOSConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -103,6 +109,9 @@ type StorageS3IBMCOSConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -112,24 +121,42 @@ type StorageS3IBMCOSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_i_drive_config.go b/client/swagger/models/storage_s3_i_drive_config.go index 4045decd..94110761 100644 --- a/client/swagger/models/storage_s3_i_drive_config.go +++ b/client/swagger/models/storage_s3_i_drive_config.go @@ -36,6 +36,12 @@ type StorageS3IDriveConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -67,10 +73,10 @@ type StorageS3IDriveConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -91,6 +97,9 @@ type StorageS3IDriveConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -100,24 +109,42 @@ type StorageS3IDriveConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_i_o_n_o_s_config.go b/client/swagger/models/storage_s3_i_o_n_o_s_config.go index 2dd3849f..441e5c1d 100644 --- a/client/swagger/models/storage_s3_i_o_n_o_s_config.go +++ b/client/swagger/models/storage_s3_i_o_n_o_s_config.go @@ -36,6 +36,12 @@ type StorageS3IONOSConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -71,10 +77,10 @@ type StorageS3IONOSConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -99,6 +105,9 @@ type StorageS3IONOSConfig struct { // Example: de Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -108,24 +117,42 @@ type StorageS3IONOSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_leviia_config.go b/client/swagger/models/storage_s3_leviia_config.go new file mode 100644 index 00000000..f53de1a2 --- /dev/null +++ b/client/swagger/models/storage_s3_leviia_config.go @@ -0,0 +1,184 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3LeviiaConfig storage s3 leviia config +// +// swagger:model storage.s3LeviiaConfig +type StorageS3LeviiaConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + Region string `json:"region,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 leviia config +func (m *StorageS3LeviiaConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 leviia config based on context it is used +func (m *StorageS3LeviiaConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3LeviiaConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3LeviiaConfig) UnmarshalBinary(b []byte) error { + var res StorageS3LeviiaConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_liara_config.go b/client/swagger/models/storage_s3_liara_config.go index a3c79bde..7331d7fb 100644 --- a/client/swagger/models/storage_s3_liara_config.go +++ b/client/swagger/models/storage_s3_liara_config.go @@ -36,6 +36,12 @@ type StorageS3LiaraConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -71,10 +77,10 @@ type StorageS3LiaraConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -95,6 +101,9 @@ type StorageS3LiaraConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -108,24 +117,42 @@ type StorageS3LiaraConfig struct { // Example: STANDARD StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_linode_config.go b/client/swagger/models/storage_s3_linode_config.go new file mode 100644 index 00000000..c97c205a --- /dev/null +++ b/client/swagger/models/storage_s3_linode_config.go @@ -0,0 +1,182 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3LinodeConfig storage s3 linode config +// +// swagger:model storage.s3LinodeConfig +type StorageS3LinodeConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for Linode Object Storage API. + // Example: us-southeast-1.linodeobjects.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 linode config +func (m *StorageS3LinodeConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 linode config based on context it is used +func (m *StorageS3LinodeConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3LinodeConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3LinodeConfig) UnmarshalBinary(b []byte) error { + var res StorageS3LinodeConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_lyve_cloud_config.go b/client/swagger/models/storage_s3_lyve_cloud_config.go index fc3d3b71..99cb7f3e 100644 --- a/client/swagger/models/storage_s3_lyve_cloud_config.go +++ b/client/swagger/models/storage_s3_lyve_cloud_config.go @@ -36,6 +36,12 @@ type StorageS3LyveCloudConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -74,10 +80,10 @@ type StorageS3LyveCloudConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -101,6 +107,9 @@ type StorageS3LyveCloudConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -110,24 +119,42 @@ type StorageS3LyveCloudConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_magalu_config.go b/client/swagger/models/storage_s3_magalu_config.go new file mode 100644 index 00000000..66be4ba5 --- /dev/null +++ b/client/swagger/models/storage_s3_magalu_config.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3MagaluConfig storage s3 magalu config +// +// swagger:model storage.s3MagaluConfig +type StorageS3MagaluConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + // Example: br-se1.magaluobjects.com + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // The storage class to use when storing new objects in Magalu. + // Example: STANDARD + StorageClass string `json:"storageClass,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 magalu config +func (m *StorageS3MagaluConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 magalu config based on context it is used +func (m *StorageS3MagaluConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3MagaluConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3MagaluConfig) UnmarshalBinary(b []byte) error { + var res StorageS3MagaluConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_minio_config.go b/client/swagger/models/storage_s3_minio_config.go index d90f11cd..e4d035d7 100644 --- a/client/swagger/models/storage_s3_minio_config.go +++ b/client/swagger/models/storage_s3_minio_config.go @@ -36,6 +36,12 @@ type StorageS3MinioConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -73,10 +79,10 @@ type StorageS3MinioConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -100,6 +106,9 @@ type StorageS3MinioConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -127,24 +136,42 @@ type StorageS3MinioConfig struct { // If using KMS ID you must provide the ARN of Key. SseKmsKeyID string `json:"sseKmsKeyId,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_netease_config.go b/client/swagger/models/storage_s3_netease_config.go index 6f2af803..88e60e9c 100644 --- a/client/swagger/models/storage_s3_netease_config.go +++ b/client/swagger/models/storage_s3_netease_config.go @@ -36,6 +36,12 @@ type StorageS3NeteaseConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -73,10 +79,10 @@ type StorageS3NeteaseConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -100,6 +106,9 @@ type StorageS3NeteaseConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -109,24 +118,42 @@ type StorageS3NeteaseConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_other_config.go b/client/swagger/models/storage_s3_other_config.go index ecd9d0a8..82e01886 100644 --- a/client/swagger/models/storage_s3_other_config.go +++ b/client/swagger/models/storage_s3_other_config.go @@ -36,6 +36,12 @@ type StorageS3OtherConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -73,10 +79,10 @@ type StorageS3OtherConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -100,6 +106,9 @@ type StorageS3OtherConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -109,24 +118,42 @@ type StorageS3OtherConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_petabox_config.go b/client/swagger/models/storage_s3_petabox_config.go new file mode 100644 index 00000000..c510ed7e --- /dev/null +++ b/client/swagger/models/storage_s3_petabox_config.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3PetaboxConfig storage s3 petabox config +// +// swagger:model storage.s3PetaboxConfig +type StorageS3PetaboxConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for Petabox S3 Object Storage. + // Example: s3.petabox.io + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region where your bucket will be created and your data stored. + // Example: us-east-1 + Region string `json:"region,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 petabox config +func (m *StorageS3PetaboxConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 petabox config based on context it is used +func (m *StorageS3PetaboxConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3PetaboxConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3PetaboxConfig) UnmarshalBinary(b []byte) error { + var res StorageS3PetaboxConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_qiniu_config.go b/client/swagger/models/storage_s3_qiniu_config.go index 21578eee..d9ddd0fe 100644 --- a/client/swagger/models/storage_s3_qiniu_config.go +++ b/client/swagger/models/storage_s3_qiniu_config.go @@ -36,6 +36,12 @@ type StorageS3QiniuConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -75,10 +81,10 @@ type StorageS3QiniuConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -103,6 +109,9 @@ type StorageS3QiniuConfig struct { // Example: cn-east-1 Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -116,24 +125,42 @@ type StorageS3QiniuConfig struct { // Example: STANDARD StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_rack_corp_config.go b/client/swagger/models/storage_s3_rack_corp_config.go index 0d393771..90e63acf 100644 --- a/client/swagger/models/storage_s3_rack_corp_config.go +++ b/client/swagger/models/storage_s3_rack_corp_config.go @@ -36,6 +36,12 @@ type StorageS3RackCorpConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -75,10 +81,10 @@ type StorageS3RackCorpConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -103,6 +109,9 @@ type StorageS3RackCorpConfig struct { // Example: global Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -112,24 +121,42 @@ type StorageS3RackCorpConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_rclone_config.go b/client/swagger/models/storage_s3_rclone_config.go new file mode 100644 index 00000000..f7cbd1ee --- /dev/null +++ b/client/swagger/models/storage_s3_rclone_config.go @@ -0,0 +1,187 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3RcloneConfig storage s3 rclone config +// +// swagger:model storage.s3RcloneConfig +type StorageS3RcloneConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets and storing or copying objects. + ACL string `json:"acl,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for S3 API. + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region to connect to. + Region string `json:"region,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 rclone config +func (m *StorageS3RcloneConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 rclone config based on context it is used +func (m *StorageS3RcloneConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3RcloneConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3RcloneConfig) UnmarshalBinary(b []byte) error { + var res StorageS3RcloneConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_scaleway_config.go b/client/swagger/models/storage_s3_scaleway_config.go index 13cb5d32..67291be6 100644 --- a/client/swagger/models/storage_s3_scaleway_config.go +++ b/client/swagger/models/storage_s3_scaleway_config.go @@ -36,6 +36,12 @@ type StorageS3ScalewayConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -71,10 +77,10 @@ type StorageS3ScalewayConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -99,6 +105,9 @@ type StorageS3ScalewayConfig struct { // Example: nl-ams Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -111,24 +120,42 @@ type StorageS3ScalewayConfig struct { // The storage class to use when storing new objects in S3. StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_seaweed_f_s_config.go b/client/swagger/models/storage_s3_seaweed_f_s_config.go index 622e9944..812f2f10 100644 --- a/client/swagger/models/storage_s3_seaweed_f_s_config.go +++ b/client/swagger/models/storage_s3_seaweed_f_s_config.go @@ -36,6 +36,12 @@ type StorageS3SeaweedFSConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -74,10 +80,10 @@ type StorageS3SeaweedFSConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -101,6 +107,9 @@ type StorageS3SeaweedFSConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -110,24 +119,42 @@ type StorageS3SeaweedFSConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_stack_path_config.go b/client/swagger/models/storage_s3_stack_path_config.go index 0da1c593..48d24a01 100644 --- a/client/swagger/models/storage_s3_stack_path_config.go +++ b/client/swagger/models/storage_s3_stack_path_config.go @@ -36,6 +36,12 @@ type StorageS3StackPathConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -71,10 +77,10 @@ type StorageS3StackPathConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -98,6 +104,9 @@ type StorageS3StackPathConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -107,24 +116,42 @@ type StorageS3StackPathConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_storj_config.go b/client/swagger/models/storage_s3_storj_config.go index eb6bfbde..fbfc4fb3 100644 --- a/client/swagger/models/storage_s3_storj_config.go +++ b/client/swagger/models/storage_s3_storj_config.go @@ -33,6 +33,12 @@ type StorageS3StorjConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -68,10 +74,10 @@ type StorageS3StorjConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -92,6 +98,9 @@ type StorageS3StorjConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -101,24 +110,42 @@ type StorageS3StorjConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_synology_config.go b/client/swagger/models/storage_s3_synology_config.go new file mode 100644 index 00000000..29b2a083 --- /dev/null +++ b/client/swagger/models/storage_s3_synology_config.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StorageS3SynologyConfig storage s3 synology config +// +// swagger:model storage.s3SynologyConfig +type StorageS3SynologyConfig struct { + + // AWS Access Key ID. + AccessKeyID string `json:"accessKeyId,omitempty"` + + // Canned ACL used when creating buckets. + // Example: private + BucketACL string `json:"bucketAcl,omitempty"` + + // Chunk size to use for uploading. + ChunkSize *string `json:"chunkSize,omitempty"` + + // Cutoff for switching to multipart copy. + CopyCutoff *string `json:"copyCutoff,omitempty"` + + // If set this will decompress gzip encoded objects. + Decompress *bool `json:"decompress,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + + // Don't store MD5 checksum with object metadata. + DisableChecksum *bool `json:"disableChecksum,omitempty"` + + // Disable usage of http2 for S3 backends. + DisableHttp2 *bool `json:"disableHttp2,omitempty"` + + // Custom endpoint for downloads. + DownloadURL string `json:"downloadUrl,omitempty"` + + // The encoding for the backend. + Encoding *string `json:"encoding,omitempty"` + + // Endpoint for Synology C2 Object Storage API. + // Example: eu-001.s3.synologyc2.net + Endpoint string `json:"endpoint,omitempty"` + + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Example: false + EnvAuth *bool `json:"envAuth,omitempty"` + + // If true use path style access if false use virtual hosted style. + ForcePathStyle *bool `json:"forcePathStyle,omitempty"` + + // Size of listing chunk (response list for each ListObject S3 request). + ListChunk *int64 `json:"listChunk,omitempty"` + + // Whether to url encode listings: true/false/unset + ListURLEncode *string `json:"listUrlEncode,omitempty"` + + // Version of ListObjects to use: 1,2 or 0 for auto. + ListVersion int64 `json:"listVersion,omitempty"` + + // Location constraint - must be set to match the Region. + LocationConstraint string `json:"locationConstraint,omitempty"` + + // Maximum number of parts in a multipart upload. + MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` + + // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` + + // Whether to use mmap buffers in internal memory pool. (no longer used) + MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` + + // Set this if the backend might gzip objects. + MightGzip *string `json:"mightGzip,omitempty"` + + // If set, don't attempt to check the bucket exists or create it. + NoCheckBucket *bool `json:"noCheckBucket,omitempty"` + + // If set, don't HEAD uploaded objects to check integrity. + NoHead *bool `json:"noHead,omitempty"` + + // If set, do not do HEAD before GET when getting objects. + NoHeadObject *bool `json:"noHeadObject,omitempty"` + + // Suppress setting and reading of system metadata + NoSystemMetadata *bool `json:"noSystemMetadata,omitempty"` + + // Profile to use in the shared credentials file. + Profile string `json:"profile,omitempty"` + + // Region where your data stored. + // Example: eu-001 + Region string `json:"region,omitempty"` + + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + + // AWS Secret Access Key (password). + SecretAccessKey string `json:"secretAccessKey,omitempty"` + + // An AWS session token. + SessionToken string `json:"sessionToken,omitempty"` + + // Path to the shared credentials file. + SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` + + // Concurrency for multipart uploads and copies. + UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` + + // Cutoff for switching to chunked upload. + UploadCutoff *string `json:"uploadCutoff,omitempty"` + + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + + // Whether to use ETag in multipart uploads for verification + UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + + // Whether to use a presigned request or PutObject for single part uploads + UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + + // If true use v2 authentication. + V2Auth *bool `json:"v2Auth,omitempty"` + + // Show file versions as they were at the specified time. + VersionAt *string `json:"versionAt,omitempty"` + + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + + // Include old versions in directory listings. + Versions *bool `json:"versions,omitempty"` +} + +// Validate validates this storage s3 synology config +func (m *StorageS3SynologyConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this storage s3 synology config based on context it is used +func (m *StorageS3SynologyConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StorageS3SynologyConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StorageS3SynologyConfig) UnmarshalBinary(b []byte) error { + var res StorageS3SynologyConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/client/swagger/models/storage_s3_tencent_c_o_s_config.go b/client/swagger/models/storage_s3_tencent_c_o_s_config.go index f957d928..3eec345b 100644 --- a/client/swagger/models/storage_s3_tencent_c_o_s_config.go +++ b/client/swagger/models/storage_s3_tencent_c_o_s_config.go @@ -37,6 +37,12 @@ type StorageS3TencentCOSConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -72,10 +78,10 @@ type StorageS3TencentCOSConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -96,6 +102,9 @@ type StorageS3TencentCOSConfig struct { // Profile to use in the shared credentials file. Profile string `json:"profile,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -108,24 +117,42 @@ type StorageS3TencentCOSConfig struct { // The storage class to use when storing new objects in Tencent COS. StorageClass string `json:"storageClass,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_s3_wasabi_config.go b/client/swagger/models/storage_s3_wasabi_config.go index c335e8b4..b2c6ef7c 100644 --- a/client/swagger/models/storage_s3_wasabi_config.go +++ b/client/swagger/models/storage_s3_wasabi_config.go @@ -36,6 +36,12 @@ type StorageS3WasabiConfig struct { // If set this will decompress gzip encoded objects. Decompress *bool `json:"decompress,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + + // Upload an empty object with a trailing slash when a new directory is created + DirectoryMarkers *bool `json:"directoryMarkers,omitempty"` + // Don't store MD5 checksum with object metadata. DisableChecksum *bool `json:"disableChecksum,omitempty"` @@ -74,10 +80,10 @@ type StorageS3WasabiConfig struct { // Maximum number of parts in a multipart upload. MaxUploadParts *int64 `json:"maxUploadParts,omitempty"` - // How often internal memory buffer pools will be flushed. + // How often internal memory buffer pools will be flushed. (no longer used) MemoryPoolFlushTime *string `json:"memoryPoolFlushTime,omitempty"` - // Whether to use mmap buffers in internal memory pool. + // Whether to use mmap buffers in internal memory pool. (no longer used) MemoryPoolUseMmap *bool `json:"memoryPoolUseMmap,omitempty"` // Set this if the backend might gzip objects. @@ -101,6 +107,9 @@ type StorageS3WasabiConfig struct { // Region to connect to. Region string `json:"region,omitempty"` + // Set to debug the SDK + SdkLogMode *string `json:"sdkLogMode,omitempty"` + // AWS Secret Access Key (password). SecretAccessKey string `json:"secretAccessKey,omitempty"` @@ -110,24 +119,42 @@ type StorageS3WasabiConfig struct { // Path to the shared credentials file. SharedCredentialsFile string `json:"sharedCredentialsFile,omitempty"` - // Concurrency for multipart uploads. + // Concurrency for multipart uploads and copies. UploadConcurrency *int64 `json:"uploadConcurrency,omitempty"` // Cutoff for switching to chunked upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` + // Whether to send `Accept-Encoding: gzip` header. + UseAcceptEncodingGzip *string `json:"useAcceptEncodingGzip,omitempty"` + + // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseAlreadyExists *string `json:"useAlreadyExists,omitempty"` + + // If true use AWS S3 dual-stack endpoint (IPv6 support). + UseDualStack *bool `json:"useDualStack,omitempty"` + // Whether to use ETag in multipart uploads for verification UseMultipartEtag *string `json:"useMultipartEtag,omitempty"` + // Set if rclone should use multipart uploads. + UseMultipartUploads *string `json:"useMultipartUploads,omitempty"` + // Whether to use a presigned request or PutObject for single part uploads UsePresignedRequest *bool `json:"usePresignedRequest,omitempty"` + // Whether to use an unsigned payload in PutObject + UseUnsignedPayload *string `json:"useUnsignedPayload,omitempty"` + // If true use v2 authentication. V2Auth *bool `json:"v2Auth,omitempty"` // Show file versions as they were at the specified time. VersionAt *string `json:"versionAt,omitempty"` + // Show deleted file markers when using versions. + VersionDeleted *bool `json:"versionDeleted,omitempty"` + // Include old versions in directory listings. Versions *bool `json:"versions,omitempty"` } diff --git a/client/swagger/models/storage_seafile_config.go b/client/swagger/models/storage_seafile_config.go index f29a2c4f..55542257 100644 --- a/client/swagger/models/storage_seafile_config.go +++ b/client/swagger/models/storage_seafile_config.go @@ -26,6 +26,9 @@ type StorageSeafileConfig struct { // Should rclone create a library if it doesn't exist. CreateLibrary *bool `json:"createLibrary,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_sftp_config.go b/client/swagger/models/storage_sftp_config.go index 92e44586..cd21de95 100644 --- a/client/swagger/models/storage_sftp_config.go +++ b/client/swagger/models/storage_sftp_config.go @@ -29,6 +29,15 @@ type StorageSftpConfig struct { // The maximum number of outstanding requests for one file Concurrency *int64 `json:"concurrency,omitempty"` + // Maximum number of SFTP simultaneous connections, 0 for unlimited. + Connections int64 `json:"connections,omitempty"` + + // Set to enable server side copies using hardlinks. + CopyIsHardlink *bool `json:"copyIsHardlink,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // If set don't use concurrent reads. DisableConcurrentReads *bool `json:"disableConcurrentReads,omitempty"` @@ -41,6 +50,9 @@ type StorageSftpConfig struct { // SSH host to connect to. Host string `json:"host,omitempty"` + // Space separated list of host key algorithms, ordered by preference. + HostKeyAlgorithms string `json:"hostKeyAlgorithms,omitempty"` + // Max time before closing idle connections. IdleTimeout *string `json:"idleTimeout,omitempty"` @@ -100,6 +112,12 @@ type StorageSftpConfig struct { // Set to skip any symlinks and any other non regular files. SkipLinks *bool `json:"skipLinks,omitempty"` + // Socks 5 proxy host. + SocksProxy string `json:"socksProxy,omitempty"` + + // Path and arguments to external ssh binary. + SSH string `json:"ssh,omitempty"` + // Specifies the SSH2 subsystem on the remote host. Subsystem *string `json:"subsystem,omitempty"` diff --git a/client/swagger/models/storage_sharefile_config.go b/client/swagger/models/storage_sharefile_config.go index e2cdf2fd..e608f8f5 100644 --- a/client/swagger/models/storage_sharefile_config.go +++ b/client/swagger/models/storage_sharefile_config.go @@ -17,9 +17,21 @@ import ( // swagger:model storage.sharefileConfig type StorageSharefileConfig struct { + // Auth server URL. + AuthURL string `json:"authUrl,omitempty"` + // Upload chunk size. ChunkSize *string `json:"chunkSize,omitempty"` + // OAuth Client Id. + ClientID string `json:"clientId,omitempty"` + + // OAuth Client Secret. + ClientSecret string `json:"clientSecret,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` @@ -29,6 +41,12 @@ type StorageSharefileConfig struct { // ID of the root folder. RootFolderID string `json:"rootFolderId,omitempty"` + // OAuth Access Token as a JSON blob. + Token string `json:"token,omitempty"` + + // Token server url. + TokenURL string `json:"tokenUrl,omitempty"` + // Cutoff for switching to multipart upload. UploadCutoff *string `json:"uploadCutoff,omitempty"` } diff --git a/client/swagger/models/storage_sia_config.go b/client/swagger/models/storage_sia_config.go index 902d3600..7cda4d7a 100644 --- a/client/swagger/models/storage_sia_config.go +++ b/client/swagger/models/storage_sia_config.go @@ -23,6 +23,9 @@ type StorageSiaConfig struct { // Sia daemon API URL, like http://sia.daemon.host:9980. APIURL *string `json:"apiUrl,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_smb_config.go b/client/swagger/models/storage_smb_config.go index a921d4bc..3170bfff 100644 --- a/client/swagger/models/storage_smb_config.go +++ b/client/swagger/models/storage_smb_config.go @@ -20,6 +20,9 @@ type StorageSmbConfig struct { // Whether the server is configured to be case-insensitive. CaseInsensitive *bool `json:"caseInsensitive,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Domain name for NTLM authentication. Domain *string `json:"domain,omitempty"` diff --git a/client/swagger/models/storage_storj_existing_config.go b/client/swagger/models/storage_storj_existing_config.go index 0cbfa56b..fb124340 100644 --- a/client/swagger/models/storage_storj_existing_config.go +++ b/client/swagger/models/storage_storj_existing_config.go @@ -19,6 +19,9 @@ type StorageStorjExistingConfig struct { // Access grant. AccessGrant string `json:"accessGrant,omitempty"` + + // Description of the remote. + Description string `json:"description,omitempty"` } // Validate validates this storage storj existing config diff --git a/client/swagger/models/storage_storj_new_config.go b/client/swagger/models/storage_storj_new_config.go index b4c94881..a1acc140 100644 --- a/client/swagger/models/storage_storj_new_config.go +++ b/client/swagger/models/storage_storj_new_config.go @@ -20,6 +20,9 @@ type StorageStorjNewConfig struct { // API key. APIKey string `json:"apiKey,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Encryption passphrase. Passphrase string `json:"passphrase,omitempty"` diff --git a/client/swagger/models/storage_sugarsync_config.go b/client/swagger/models/storage_sugarsync_config.go index a429934b..6da4bee2 100644 --- a/client/swagger/models/storage_sugarsync_config.go +++ b/client/swagger/models/storage_sugarsync_config.go @@ -32,6 +32,9 @@ type StorageSugarsyncConfig struct { // Sugarsync deleted folder id. DeletedID string `json:"deletedId,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/storage_swift_config.go b/client/swagger/models/storage_swift_config.go index 0c4e290c..00ab9ac1 100644 --- a/client/swagger/models/storage_swift_config.go +++ b/client/swagger/models/storage_swift_config.go @@ -36,9 +36,12 @@ type StorageSwiftConfig struct { // AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION). AuthVersion int64 `json:"authVersion,omitempty"` - // Above this size files will be chunked into a _segments container. + // Above this size files will be chunked. ChunkSize *string `json:"chunkSize,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) Domain string `json:"domain,omitempty"` @@ -53,6 +56,9 @@ type StorageSwiftConfig struct { // Example: false EnvAuth *bool `json:"envAuth,omitempty"` + // When paginating, always fetch unless we received an empty page. + FetchUntilEmptyPage *bool `json:"fetchUntilEmptyPage,omitempty"` + // API key or password (OS_PASSWORD). Key string `json:"key,omitempty"` @@ -65,6 +71,9 @@ type StorageSwiftConfig struct { // Disable support for static and dynamic large objects NoLargeObjects *bool `json:"noLargeObjects,omitempty"` + // When paginating, fetch if the current page is within this percentage of the limit. + PartialPageFetchThreshold int64 `json:"partialPageFetchThreshold,omitempty"` + // Region name - optional (OS_REGION_NAME). Region string `json:"region,omitempty"` @@ -83,6 +92,9 @@ type StorageSwiftConfig struct { // Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID). TenantID string `json:"tenantId,omitempty"` + // Choose destination for large object segments + UseSegmentsContainer *string `json:"useSegmentsContainer,omitempty"` + // User name to log in (OS_USERNAME). User string `json:"user,omitempty"` diff --git a/client/swagger/models/storage_union_config.go b/client/swagger/models/storage_union_config.go index 2e3c12bd..a2e46efc 100644 --- a/client/swagger/models/storage_union_config.go +++ b/client/swagger/models/storage_union_config.go @@ -26,6 +26,9 @@ type StorageUnionConfig struct { // Policy to choose upstream on CREATE category. CreatePolicy *string `json:"createPolicy,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // Minimum viable free space for lfs/eplfs policies. MinFreeSpace *string `json:"minFreeSpace,omitempty"` diff --git a/client/swagger/models/storage_uptobox_config.go b/client/swagger/models/storage_uptobox_config.go index 4432159a..8bab7e89 100644 --- a/client/swagger/models/storage_uptobox_config.go +++ b/client/swagger/models/storage_uptobox_config.go @@ -20,8 +20,14 @@ type StorageUptoboxConfig struct { // Your access token. AccessToken string `json:"accessToken,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` + + // Set to make uploaded files private + Private *bool `json:"private,omitempty"` } // Validate validates this storage uptobox config diff --git a/client/swagger/models/storage_webdav_config.go b/client/swagger/models/storage_webdav_config.go index 0ee1030c..b6c3d1df 100644 --- a/client/swagger/models/storage_webdav_config.go +++ b/client/swagger/models/storage_webdav_config.go @@ -23,15 +23,33 @@ type StorageWebdavConfig struct { // Command to run to get a bearer token. BearerTokenCommand string `json:"bearerTokenCommand,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding string `json:"encoding,omitempty"` // Set HTTP headers for all transactions. Headers string `json:"headers,omitempty"` + // Nextcloud upload chunk size. + NextcloudChunkSize *string `json:"nextcloudChunkSize,omitempty"` + + // Exclude ownCloud mounted storages + OwncloudExcludeMounts *bool `json:"owncloudExcludeMounts,omitempty"` + + // Exclude ownCloud shares + OwncloudExcludeShares *bool `json:"owncloudExcludeShares,omitempty"` + + // Minimum time to sleep between API calls. + PacerMinSleep *string `json:"pacerMinSleep,omitempty"` + // Password. Pass string `json:"pass,omitempty"` + // Path to a unix domain socket to dial to, instead of opening a TCP connection directly + UnixSocket string `json:"unixSocket,omitempty"` + // URL of http host to connect to. URL string `json:"url,omitempty"` @@ -39,7 +57,7 @@ type StorageWebdavConfig struct { User string `json:"user,omitempty"` // Name of the WebDAV site/service/software you are using. - // Example: nextcloud + // Example: fastmail Vendor string `json:"vendor,omitempty"` } diff --git a/client/swagger/models/storage_yandex_config.go b/client/swagger/models/storage_yandex_config.go index 46610eba..bc5eadba 100644 --- a/client/swagger/models/storage_yandex_config.go +++ b/client/swagger/models/storage_yandex_config.go @@ -26,12 +26,18 @@ type StorageYandexConfig struct { // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` // Delete files permanently rather than putting them into the trash. HardDelete *bool `json:"hardDelete,omitempty"` + // Set the user agent to match an official version of the yandex disk client. May help with upload performance. + SpoofUa *bool `json:"spoofUa,omitempty"` + // OAuth Access Token as a JSON blob. Token string `json:"token,omitempty"` diff --git a/client/swagger/models/storage_zoho_config.go b/client/swagger/models/storage_zoho_config.go index 3443411b..a76637cd 100644 --- a/client/swagger/models/storage_zoho_config.go +++ b/client/swagger/models/storage_zoho_config.go @@ -26,6 +26,9 @@ type StorageZohoConfig struct { // OAuth Client Secret. ClientSecret string `json:"clientSecret,omitempty"` + // Description of the remote. + Description string `json:"description,omitempty"` + // The encoding for the backend. Encoding *string `json:"encoding,omitempty"` diff --git a/client/swagger/models/store_piece_reader.go b/client/swagger/models/store_piece_reader.go index 12ebc0f6..e582d899 100644 --- a/client/swagger/models/store_piece_reader.go +++ b/client/swagger/models/store_piece_reader.go @@ -8,4 +8,4 @@ package models // StorePieceReader store piece reader // // swagger:model store.PieceReader -type StorePieceReader interface{} +type StorePieceReader any diff --git a/docs/en/SUMMARY.md b/docs/en/SUMMARY.md index f1b32fcc..cd214212 100644 --- a/docs/en/SUMMARY.md +++ b/docs/en/SUMMARY.md @@ -40,8 +40,6 @@ * [Admin](cli-reference/admin/README.md) * [Init](cli-reference/admin/init.md) * [Reset](cli-reference/admin/reset.md) - * [Migrate Dataset](cli-reference/admin/migrate-dataset.md) - * [Migrate Schedule](cli-reference/admin/migrate-schedule.md) * [Download](cli-reference/download.md) * [Extract Car](cli-reference/extract-car.md) * [Deal](cli-reference/deal/README.md) @@ -67,7 +65,6 @@ * [Remove](cli-reference/wallet/remove.md) * [Storage](cli-reference/storage/README.md) * [Create](cli-reference/storage/create/README.md) - * [Acd](cli-reference/storage/create/acd.md) * [Azureblob](cli-reference/storage/create/azureblob.md) * [B2](cli-reference/storage/create/b2.md) * [Box](cli-reference/storage/create/box.md) @@ -98,6 +95,7 @@ * [No_auth](cli-reference/storage/create/oos/no_auth.md) * [Resource_principal_auth](cli-reference/storage/create/oos/resource_principal_auth.md) * [User_principal_auth](cli-reference/storage/create/oos/user_principal_auth.md) + * [Workload_identity_auth](cli-reference/storage/create/oos/workload_identity_auth.md) * [Opendrive](cli-reference/storage/create/opendrive.md) * [Pcloud](cli-reference/storage/create/pcloud.md) * [Premiumizeme](cli-reference/storage/create/premiumizeme.md) @@ -112,21 +110,28 @@ * [Cloudflare](cli-reference/storage/create/s3/cloudflare.md) * [Digitalocean](cli-reference/storage/create/s3/digitalocean.md) * [Dreamhost](cli-reference/storage/create/s3/dreamhost.md) + * [Google Cloud Storage](cli-reference/storage/create/s3/gcs.md) * [Huaweiobs](cli-reference/storage/create/s3/huaweiobs.md) * [Ibmcos](cli-reference/storage/create/s3/ibmcos.md) * [Idrive](cli-reference/storage/create/s3/idrive.md) * [Ionos](cli-reference/storage/create/s3/ionos.md) + * [Leviia](cli-reference/storage/create/s3/leviia.md) * [Liara](cli-reference/storage/create/s3/liara.md) + * [Linode](cli-reference/storage/create/s3/linode.md) * [Lyvecloud](cli-reference/storage/create/s3/lyvecloud.md) + * [Magalu](cli-reference/storage/create/s3/magalu.md) * [Minio](cli-reference/storage/create/s3/minio.md) * [Netease](cli-reference/storage/create/s3/netease.md) * [Other](cli-reference/storage/create/s3/other.md) + * [Petabox](cli-reference/storage/create/s3/petabox.md) * [Qiniu](cli-reference/storage/create/s3/qiniu.md) * [Rackcorp](cli-reference/storage/create/s3/rackcorp.md) + * [Rclone](cli-reference/storage/create/s3/rclone.md) * [Scaleway](cli-reference/storage/create/s3/scaleway.md) * [Seaweedfs](cli-reference/storage/create/s3/seaweedfs.md) * [Stackpath](cli-reference/storage/create/s3/stackpath.md) * [Storj](cli-reference/storage/create/s3/storj.md) + * [Synology](cli-reference/storage/create/s3/synology.md) * [Tencentcos](cli-reference/storage/create/s3/tencentcos.md) * [Wasabi](cli-reference/storage/create/s3/wasabi.md) * [Seafile](cli-reference/storage/create/seafile.md) @@ -148,7 +153,6 @@ * [List](cli-reference/storage/list.md) * [Remove](cli-reference/storage/remove.md) * [Update](cli-reference/storage/update/README.md) - * [Acd](cli-reference/storage/update/acd.md) * [Azureblob](cli-reference/storage/update/azureblob.md) * [B2](cli-reference/storage/update/b2.md) * [Box](cli-reference/storage/update/box.md) @@ -179,6 +183,7 @@ * [No_auth](cli-reference/storage/update/oos/no_auth.md) * [Resource_principal_auth](cli-reference/storage/update/oos/resource_principal_auth.md) * [User_principal_auth](cli-reference/storage/update/oos/user_principal_auth.md) + * [Workload_identity_auth](cli-reference/storage/update/oos/workload_identity_auth.md) * [Opendrive](cli-reference/storage/update/opendrive.md) * [Pcloud](cli-reference/storage/update/pcloud.md) * [Premiumizeme](cli-reference/storage/update/premiumizeme.md) @@ -193,21 +198,28 @@ * [Cloudflare](cli-reference/storage/update/s3/cloudflare.md) * [Digitalocean](cli-reference/storage/update/s3/digitalocean.md) * [Dreamhost](cli-reference/storage/update/s3/dreamhost.md) + * [Google Cloud Storage](cli-reference/storage/update/s3/gcs.md) * [Huaweiobs](cli-reference/storage/update/s3/huaweiobs.md) * [Ibmcos](cli-reference/storage/update/s3/ibmcos.md) * [Idrive](cli-reference/storage/update/s3/idrive.md) * [Ionos](cli-reference/storage/update/s3/ionos.md) + * [Leviia](cli-reference/storage/update/s3/leviia.md) * [Liara](cli-reference/storage/update/s3/liara.md) + * [Linode](cli-reference/storage/update/s3/linode.md) * [Lyvecloud](cli-reference/storage/update/s3/lyvecloud.md) + * [Magalu](cli-reference/storage/update/s3/magalu.md) * [Minio](cli-reference/storage/update/s3/minio.md) * [Netease](cli-reference/storage/update/s3/netease.md) * [Other](cli-reference/storage/update/s3/other.md) + * [Petabox](cli-reference/storage/update/s3/petabox.md) * [Qiniu](cli-reference/storage/update/s3/qiniu.md) * [Rackcorp](cli-reference/storage/update/s3/rackcorp.md) + * [Rclone](cli-reference/storage/update/s3/rclone.md) * [Scaleway](cli-reference/storage/update/s3/scaleway.md) * [Seaweedfs](cli-reference/storage/update/s3/seaweedfs.md) * [Stackpath](cli-reference/storage/update/s3/stackpath.md) * [Storj](cli-reference/storage/update/s3/storj.md) + * [Synology](cli-reference/storage/update/s3/synology.md) * [Tencentcos](cli-reference/storage/update/s3/tencentcos.md) * [Wasabi](cli-reference/storage/update/s3/wasabi.md) * [Seafile](cli-reference/storage/update/seafile.md) @@ -242,6 +254,7 @@ * [Pause Daggen](cli-reference/prep/pause-daggen.md) * [List Pieces](cli-reference/prep/list-pieces.md) * [Add Piece](cli-reference/prep/add-piece.md) + * [Delete Piece](cli-reference/prep/delete-piece.md) * [Explore](cli-reference/prep/explore.md) * [Attach Wallet](cli-reference/prep/attach-wallet.md) * [List Wallets](cli-reference/prep/list-wallets.md) diff --git a/docs/en/cli-reference/admin/README.md b/docs/en/cli-reference/admin/README.md index 0e654c78..1c0397e2 100644 --- a/docs/en/cli-reference/admin/README.md +++ b/docs/en/cli-reference/admin/README.md @@ -9,9 +9,9 @@ USAGE: singularity admin command [command options] COMMANDS: - init Initialize or upgrade the database - reset Reset the database - help, h Shows a list of commands or help for one command + init Initialize or upgrade the database + reset Reset the database + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help diff --git a/docs/en/cli-reference/download.md b/docs/en/cli-reference/download.md index e8e710a0..5f049623 100644 --- a/docs/en/cli-reference/download.md +++ b/docs/en/cli-reference/download.md @@ -22,13 +22,7 @@ OPTIONS: --netstorage-secret value Set the NetStorage account secret/G2O key for authentication. [$NETSTORAGE_SECRET] - Amazon Drive - - --acd-client-secret value OAuth Client Secret. [$ACD_CLIENT_SECRET] - --acd-token value OAuth Access Token as a JSON blob. [$ACD_TOKEN] - --acd-token-url value Token server url. [$ACD_TOKEN_URL] - - Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others --s3-access-key-id value AWS Access Key ID. [$S3_ACCESS_KEY_ID] --s3-secret-access-key value AWS Secret Access Key (password). [$S3_SECRET_ACCESS_KEY] @@ -49,6 +43,12 @@ OPTIONS: --box-token value OAuth Access Token as a JSON blob. [$BOX_TOKEN] --box-token-url value Token server url. [$BOX_TOKEN_URL] + Citrix Sharefile + + --sharefile-client-secret value OAuth Client Secret. [$SHAREFILE_CLIENT_SECRET] + --sharefile-token value OAuth Access Token as a JSON blob. [$SHAREFILE_TOKEN] + --sharefile-token-url value Token server url. [$SHAREFILE_TOKEN_URL] + Client Config --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. @@ -62,7 +62,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Dropbox @@ -83,10 +83,10 @@ OPTIONS: General Config - --api value URL of the metadata API (default: "http://127.0.0.1:7777") - --concurrency value Number of concurrent downloads (default: 10) - --out-dir value Directory to write CAR files to (default: ".") - --quiet Suppress all output (default: false) + --api value, --metadata-api value URL of the metadata API (default: "http://127.0.0.1:7777") + --concurrency value Number of concurrent downloads (default: 10) + --out-dir value Directory to write CAR files to (default: ".") + --quiet Suppress all output (default: false) Google Cloud Storage (this is not Google Drive) @@ -118,13 +118,22 @@ OPTIONS: --internetarchive-access-key-id value IAS3 Access Key. [$INTERNETARCHIVE_ACCESS_KEY_ID] --internetarchive-secret-access-key value IAS3 Secret Key (password). [$INTERNETARCHIVE_SECRET_ACCESS_KEY] + Jottacloud + + --jottacloud-client-secret value OAuth Client Secret. [$JOTTACLOUD_CLIENT_SECRET] + --jottacloud-token value OAuth Access Token as a JSON blob. [$JOTTACLOUD_TOKEN] + --jottacloud-token-url value Token server url. [$JOTTACLOUD_TOKEN_URL] + Koofr, Digi Storage and other Koofr-compatible storage providers - --koofr-password value Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). [$KOOFR_PASSWORD] + --koofr-password value Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. [$KOOFR_PASSWORD] Mail.ru Cloud - --mailru-pass value Password. [$MAILRU_PASS] + --mailru-client-secret value OAuth Client Secret. [$MAILRU_CLIENT_SECRET] + --mailru-pass value Password. [$MAILRU_PASS] + --mailru-token value OAuth Access Token as a JSON blob. [$MAILRU_TOKEN] + --mailru-token-url value Token server url. [$MAILRU_TOKEN_URL] Mega @@ -148,7 +157,7 @@ OPTIONS: --opendrive-password value Password. [$OPENDRIVE_PASSWORD] - OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) --swift-application-credential-secret value Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET). [$SWIFT_APPLICATION_CREDENTIAL_SECRET] --swift-auth-token value Auth Token from alternate authentication - optional (OS_AUTH_TOKEN). [$SWIFT_AUTH_TOKEN] @@ -159,7 +168,7 @@ OPTIONS: --oos-sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$OOS_SSE_CUSTOMER_KEY] --oos-sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$OOS_SSE_CUSTOMER_KEY_FILE] --oos-sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$OOS_SSE_CUSTOMER_KEY_SHA256] - --oos-sse-kms-key-id value if using using your own master key in vault, this header specifies the [$OOS_SSE_KMS_KEY_ID] + --oos-sse-kms-key-id value if using your own master key in vault, this header specifies the [$OOS_SSE_KMS_KEY_ID] Pcloud @@ -168,6 +177,12 @@ OPTIONS: --pcloud-token value OAuth Access Token as a JSON blob. [$PCLOUD_TOKEN] --pcloud-token-url value Token server url. [$PCLOUD_TOKEN_URL] + Put.io + + --putio-client-secret value OAuth Client Secret. [$PUTIO_CLIENT_SECRET] + --putio-token value OAuth Access Token as a JSON blob. [$PUTIO_TOKEN] + --putio-token-url value Token server url. [$PUTIO_TOKEN_URL] + QingCloud Object Storage --qingstor-access-key-id value QingStor Access Key ID. [$QINGSTOR_ACCESS_KEY_ID] @@ -188,14 +203,15 @@ OPTIONS: SSH/SFTP - --sftp-ask-password Allow asking for SFTP password when needed. (default: false) [$SFTP_ASK_PASSWORD] - --sftp-key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$SFTP_KEY_EXCHANGE] - --sftp-key-file value Path to PEM-encoded private key file. [$SFTP_KEY_FILE] - --sftp-key-file-pass value The passphrase to decrypt the PEM-encoded private key file. [$SFTP_KEY_FILE_PASS] - --sftp-key-pem value Raw PEM-encoded private key. [$SFTP_KEY_PEM] - --sftp-key-use-agent When set forces the usage of the ssh-agent. (default: false) [$SFTP_KEY_USE_AGENT] - --sftp-pass value SSH password, leave blank to use ssh-agent. [$SFTP_PASS] - --sftp-pubkey-file value Optional path to public key file. [$SFTP_PUBKEY_FILE] + --sftp-ask-password Allow asking for SFTP password when needed. (default: false) [$SFTP_ASK_PASSWORD] + --sftp-host-key-algorithms value Space separated list of host key algorithms, ordered by preference. [$SFTP_HOST_KEY_ALGORITHMS] + --sftp-key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$SFTP_KEY_EXCHANGE] + --sftp-key-file value Path to PEM-encoded private key file. [$SFTP_KEY_FILE] + --sftp-key-file-pass value The passphrase to decrypt the PEM-encoded private key file. [$SFTP_KEY_FILE_PASS] + --sftp-key-pem value Raw PEM-encoded private key. [$SFTP_KEY_PEM] + --sftp-key-use-agent When set forces the usage of the ssh-agent. (default: false) [$SFTP_KEY_USE_AGENT] + --sftp-pass value SSH password, leave blank to use ssh-agent. [$SFTP_PASS] + --sftp-pubkey-file value Optional path to public key file. [$SFTP_PUBKEY_FILE] Sia Decentralized Cloud @@ -236,7 +252,10 @@ OPTIONS: premiumize.me - --premiumizeme-api-key value API Key. [$PREMIUMIZEME_API_KEY] + --premiumizeme-api-key value API Key. [$PREMIUMIZEME_API_KEY] + --premiumizeme-client-secret value OAuth Client Secret. [$PREMIUMIZEME_CLIENT_SECRET] + --premiumizeme-token value OAuth Access Token as a JSON blob. [$PREMIUMIZEME_TOKEN] + --premiumizeme-token-url value Token server url. [$PREMIUMIZEME_TOKEN_URL] seafile diff --git a/docs/en/cli-reference/prep/README.md b/docs/en/cli-reference/prep/README.md index 08338bc3..824fcc22 100644 --- a/docs/en/cli-reference/prep/README.md +++ b/docs/en/cli-reference/prep/README.md @@ -21,8 +21,9 @@ COMMANDS: start-daggen Start a DAG generation that creates a snapshot of all folder structures pause-daggen Pause a DAG generation job Piece Management: - list-pieces List all generated pieces for a preparation - add-piece Manually add piece info to a preparation. This is useful for pieces prepared by external tools. + list-pieces List all generated pieces for a preparation + add-piece Manually add piece info to a preparation. This is useful for pieces prepared by external tools. + delete-piece Delete a piece from a preparation Preparation Management: create Create a new preparation list List all preparations diff --git a/docs/en/cli-reference/prep/delete-piece.md b/docs/en/cli-reference/prep/delete-piece.md new file mode 100644 index 00000000..35e19949 --- /dev/null +++ b/docs/en/cli-reference/prep/delete-piece.md @@ -0,0 +1,19 @@ +# Delete a piece from a preparation + +{% code fullWidth="true" %} +``` +NAME: + singularity prep delete-piece - Delete a piece from a preparation + +USAGE: + singularity prep delete-piece [command options] + +CATEGORY: + Piece Management + +OPTIONS: + --delete-car Delete the physical CAR file from storage (default: true) + --force Delete even if deals reference this piece (default: false) + --help, -h show help +``` +{% endcode %} diff --git a/docs/en/cli-reference/run/download-server.md b/docs/en/cli-reference/run/download-server.md index 74a4e6ec..c5772831 100644 --- a/docs/en/cli-reference/run/download-server.md +++ b/docs/en/cli-reference/run/download-server.md @@ -25,13 +25,7 @@ OPTIONS: --netstorage-secret value Set the NetStorage account secret/G2O key for authentication. [$NETSTORAGE_SECRET] - Amazon Drive - - --acd-client-secret value OAuth Client Secret. [$ACD_CLIENT_SECRET] - --acd-token value OAuth Access Token as a JSON blob. [$ACD_TOKEN] - --acd-token-url value Token server url. [$ACD_TOKEN_URL] - - Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others --s3-access-key-id value AWS Access Key ID. [$S3_ACCESS_KEY_ID] --s3-secret-access-key value AWS Secret Access Key (password). [$S3_SECRET_ACCESS_KEY] @@ -52,6 +46,12 @@ OPTIONS: --box-token value OAuth Access Token as a JSON blob. [$BOX_TOKEN] --box-token-url value Token server url. [$BOX_TOKEN_URL] + Citrix Sharefile + + --sharefile-client-secret value OAuth Client Secret. [$SHAREFILE_CLIENT_SECRET] + --sharefile-token value OAuth Access Token as a JSON blob. [$SHAREFILE_TOKEN] + --sharefile-token-url value Token server url. [$SHAREFILE_TOKEN_URL] + Client Config --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. @@ -65,7 +65,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Dropbox @@ -86,8 +86,8 @@ OPTIONS: General Config - --bind value Address to bind the HTTP server to (default: "127.0.0.1:8888") - --metadata-api value URL of the metadata API (default: "http://127.0.0.1:7777") + --bind value Address to bind the HTTP server to (default: "127.0.0.1:8888") + --metadata-api value, --api value URL of the metadata API (default: "http://127.0.0.1:7777") Google Cloud Storage (this is not Google Drive) @@ -119,13 +119,22 @@ OPTIONS: --internetarchive-access-key-id value IAS3 Access Key. [$INTERNETARCHIVE_ACCESS_KEY_ID] --internetarchive-secret-access-key value IAS3 Secret Key (password). [$INTERNETARCHIVE_SECRET_ACCESS_KEY] + Jottacloud + + --jottacloud-client-secret value OAuth Client Secret. [$JOTTACLOUD_CLIENT_SECRET] + --jottacloud-token value OAuth Access Token as a JSON blob. [$JOTTACLOUD_TOKEN] + --jottacloud-token-url value Token server url. [$JOTTACLOUD_TOKEN_URL] + Koofr, Digi Storage and other Koofr-compatible storage providers - --koofr-password value Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). [$KOOFR_PASSWORD] + --koofr-password value Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. [$KOOFR_PASSWORD] Mail.ru Cloud - --mailru-pass value Password. [$MAILRU_PASS] + --mailru-client-secret value OAuth Client Secret. [$MAILRU_CLIENT_SECRET] + --mailru-pass value Password. [$MAILRU_PASS] + --mailru-token value OAuth Access Token as a JSON blob. [$MAILRU_TOKEN] + --mailru-token-url value Token server url. [$MAILRU_TOKEN_URL] Mega @@ -149,7 +158,7 @@ OPTIONS: --opendrive-password value Password. [$OPENDRIVE_PASSWORD] - OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) --swift-application-credential-secret value Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET). [$SWIFT_APPLICATION_CREDENTIAL_SECRET] --swift-auth-token value Auth Token from alternate authentication - optional (OS_AUTH_TOKEN). [$SWIFT_AUTH_TOKEN] @@ -160,7 +169,7 @@ OPTIONS: --oos-sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$OOS_SSE_CUSTOMER_KEY] --oos-sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$OOS_SSE_CUSTOMER_KEY_FILE] --oos-sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$OOS_SSE_CUSTOMER_KEY_SHA256] - --oos-sse-kms-key-id value if using using your own master key in vault, this header specifies the [$OOS_SSE_KMS_KEY_ID] + --oos-sse-kms-key-id value if using your own master key in vault, this header specifies the [$OOS_SSE_KMS_KEY_ID] Pcloud @@ -169,6 +178,12 @@ OPTIONS: --pcloud-token value OAuth Access Token as a JSON blob. [$PCLOUD_TOKEN] --pcloud-token-url value Token server url. [$PCLOUD_TOKEN_URL] + Put.io + + --putio-client-secret value OAuth Client Secret. [$PUTIO_CLIENT_SECRET] + --putio-token value OAuth Access Token as a JSON blob. [$PUTIO_TOKEN] + --putio-token-url value Token server url. [$PUTIO_TOKEN_URL] + QingCloud Object Storage --qingstor-access-key-id value QingStor Access Key ID. [$QINGSTOR_ACCESS_KEY_ID] @@ -189,14 +204,15 @@ OPTIONS: SSH/SFTP - --sftp-ask-password Allow asking for SFTP password when needed. (default: false) [$SFTP_ASK_PASSWORD] - --sftp-key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$SFTP_KEY_EXCHANGE] - --sftp-key-file value Path to PEM-encoded private key file. [$SFTP_KEY_FILE] - --sftp-key-file-pass value The passphrase to decrypt the PEM-encoded private key file. [$SFTP_KEY_FILE_PASS] - --sftp-key-pem value Raw PEM-encoded private key. [$SFTP_KEY_PEM] - --sftp-key-use-agent When set forces the usage of the ssh-agent. (default: false) [$SFTP_KEY_USE_AGENT] - --sftp-pass value SSH password, leave blank to use ssh-agent. [$SFTP_PASS] - --sftp-pubkey-file value Optional path to public key file. [$SFTP_PUBKEY_FILE] + --sftp-ask-password Allow asking for SFTP password when needed. (default: false) [$SFTP_ASK_PASSWORD] + --sftp-host-key-algorithms value Space separated list of host key algorithms, ordered by preference. [$SFTP_HOST_KEY_ALGORITHMS] + --sftp-key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$SFTP_KEY_EXCHANGE] + --sftp-key-file value Path to PEM-encoded private key file. [$SFTP_KEY_FILE] + --sftp-key-file-pass value The passphrase to decrypt the PEM-encoded private key file. [$SFTP_KEY_FILE_PASS] + --sftp-key-pem value Raw PEM-encoded private key. [$SFTP_KEY_PEM] + --sftp-key-use-agent When set forces the usage of the ssh-agent. (default: false) [$SFTP_KEY_USE_AGENT] + --sftp-pass value SSH password, leave blank to use ssh-agent. [$SFTP_PASS] + --sftp-pubkey-file value Optional path to public key file. [$SFTP_PUBKEY_FILE] Sia Decentralized Cloud @@ -237,7 +253,10 @@ OPTIONS: premiumize.me - --premiumizeme-api-key value API Key. [$PREMIUMIZEME_API_KEY] + --premiumizeme-api-key value API Key. [$PREMIUMIZEME_API_KEY] + --premiumizeme-client-secret value OAuth Client Secret. [$PREMIUMIZEME_CLIENT_SECRET] + --premiumizeme-token value OAuth Access Token as a JSON blob. [$PREMIUMIZEME_TOKEN] + --premiumizeme-token-url value Token server url. [$PREMIUMIZEME_TOKEN_URL] seafile diff --git a/docs/en/cli-reference/storage/create/README.md b/docs/en/cli-reference/storage/create/README.md index c672c2ae..2d6bbca5 100644 --- a/docs/en/cli-reference/storage/create/README.md +++ b/docs/en/cli-reference/storage/create/README.md @@ -9,7 +9,6 @@ USAGE: singularity storage create command [command options] COMMANDS: - acd Amazon Drive azureblob Microsoft Azure Blob Storage b2 Backblaze B2 box Box @@ -37,7 +36,7 @@ COMMANDS: premiumizeme premiumize.me putio Put.io qingstor QingCloud Object Storage - s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others seafile seafile sftp SSH/SFTP sharefile Citrix Sharefile @@ -45,7 +44,7 @@ COMMANDS: smb SMB / CIFS storj Storj Decentralized Cloud Storage sugarsync Sugarsync - swift OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + swift OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) union Union merges the contents of several upstream fs uptobox Uptobox webdav WebDAV diff --git a/docs/en/cli-reference/storage/create/acd.md b/docs/en/cli-reference/storage/create/acd.md deleted file mode 100644 index 42442e5f..00000000 --- a/docs/en/cli-reference/storage/create/acd.md +++ /dev/null @@ -1,124 +0,0 @@ -# Amazon Drive - -{% code fullWidth="true" %} -``` -NAME: - singularity storage create acd - Amazon Drive - -USAGE: - singularity storage create acd [command options] - -DESCRIPTION: - --client-id - OAuth Client Id. - - Leave blank normally. - - --client-secret - OAuth Client Secret. - - Leave blank normally. - - --token - OAuth Access Token as a JSON blob. - - --auth-url - Auth server URL. - - Leave blank to use the provider defaults. - - --token-url - Token server url. - - Leave blank to use the provider defaults. - - --checkpoint - Checkpoint for internal polling (debug). - - --upload-wait-per-gb - Additional time per GiB to wait after a failed complete upload to see if it appears. - - Sometimes Amazon Drive gives an error when a file has been fully - uploaded but the file appears anyway after a little while. This - happens sometimes for files over 1 GiB in size and nearly every time for - files bigger than 10 GiB. This parameter controls the time rclone waits - for the file to appear. - - The default value for this parameter is 3 minutes per GiB, so by - default it will wait 3 minutes for every GiB uploaded to see if the - file appears. - - You can disable this feature by setting it to 0. This may cause - conflict errors as rclone retries the failed upload but the file will - most likely appear correctly eventually. - - These values were determined empirically by observing lots of uploads - of big files for a range of file sizes. - - Upload with the "-v" flag to see more info about what rclone is doing - in this situation. - - --templink-threshold - Files >= this size will be downloaded via their tempLink. - - Files this size or more will be downloaded via their "tempLink". This - is to work around a problem with Amazon Drive which blocks downloads - of files bigger than about 10 GiB. The default for this is 9 GiB which - shouldn't need to be changed. - - To download files above this threshold, rclone requests a "tempLink" - which downloads the file through a temporary URL directly from the - underlying S3 storage. - - --encoding - The encoding for the backend. - - See the [encoding section in the overview](/overview/#encoding) for more info. - - -OPTIONS: - --client-id value OAuth Client Id. [$CLIENT_ID] - --client-secret value OAuth Client Secret. [$CLIENT_SECRET] - --help, -h show help - - Advanced - - --auth-url value Auth server URL. [$AUTH_URL] - --checkpoint value Checkpoint for internal polling (debug). [$CHECKPOINT] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --templink-threshold value Files >= this size will be downloaded via their tempLink. (default: "9Gi") [$TEMPLINK_THRESHOLD] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] - --upload-wait-per-gb value Additional time per GiB to wait after a failed complete upload to see if it appears. (default: "3m0s") [$UPLOAD_WAIT_PER_GB] - - Client Config - - --client-ca-cert value Path to CA certificate used to verify servers - --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth - --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) - --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) - --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) - --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) - --client-key value Path to Client SSL private key (PEM) for mutual TLS auth - --client-no-gzip Don't set Accept-Encoding: gzip (default: false) - --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) - --client-timeout value IO idle timeout (default: 5m0s) - --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) - - General - - --name value Name of the storage (default: Auto generated) - --path value Path of the storage - - Retry Strategy - - --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) - --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) - --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) - --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) - --client-retry-max value Max number of retries for IO read errors (default: 10) - --client-skip-inaccessible Skip inaccessible files when opening (default: false) - -``` -{% endcode %} diff --git a/docs/en/cli-reference/storage/create/azureblob.md b/docs/en/cli-reference/storage/create/azureblob.md index 75b37a8f..36b66e55 100644 --- a/docs/en/cli-reference/storage/create/azureblob.md +++ b/docs/en/cli-reference/storage/create/azureblob.md @@ -195,10 +195,10 @@ DESCRIPTION: avoid the time out. --access-tier - Access tier of blob: hot, cool or archive. + Access tier of blob: hot, cool, cold or archive. - Archived blobs can be restored by setting access tier to hot or - cool. Leave blank if you intend to use default access tier, which is + Archived blobs can be restored by setting access tier to hot, cool or + cold. Leave blank if you intend to use default access tier, which is set at account level If there is no "access tier" specified, rclone doesn't apply any tier. @@ -206,7 +206,7 @@ DESCRIPTION: are not modified, specifying "access tier" to new one will have no effect. If blobs are in "archive tier" at remote, trying to perform data transfer operations from remote will not be allowed. User should first restore by - tiering blob to "Hot" or "Cool". + tiering blob to "Hot", "Cool" or "Cold". --archive-tier-delete Delete archive tier blobs before overwriting. @@ -233,13 +233,10 @@ DESCRIPTION: to start uploading. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --encoding The encoding for the backend. @@ -255,6 +252,16 @@ DESCRIPTION: | blob | Blob data within this container can be read via anonymous request. | container | Allow full public read access for container and blob data. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option + creates an empty object ending with "/", to persist the folder. + + This object also has the metadata "hdi_isfolder = true" to conform to + the Microsoft standard. + + --no-check-container If set, don't attempt to check the container exists or create it. @@ -265,6 +272,17 @@ DESCRIPTION: --no-head-object If set, do not do HEAD before GET when getting objects. + --delete-snapshots + Set to specify how to deal with snapshots on blob deletion. + + Examples: + | | By default, the delete operation fails if a blob has snapshots + | include | Specify 'include' to remove the root blob and all its snapshots + | only | Specify 'only' to remove only the snapshots but keep the root blob. + + --description + Description of the remote. + OPTIONS: --account value Azure Storage Account Name. [$ACCOUNT] @@ -280,16 +298,19 @@ OPTIONS: Advanced - --access-tier value Access tier of blob: hot, cool or archive. [$ACCESS_TIER] + --access-tier value Access tier of blob: hot, cool, cold or archive. [$ACCESS_TIER] --archive-tier-delete Delete archive tier blobs before overwriting. (default: false) [$ARCHIVE_TIER_DELETE] --chunk-size value Upload chunk size. (default: "4Mi") [$CHUNK_SIZE] --client-send-certificate-chain Send the certificate chain when using certificate auth. (default: false) [$CLIENT_SEND_CERTIFICATE_CHAIN] + --delete-snapshots value Set to specify how to deal with snapshots on blob deletion. [$DELETE_SNAPSHOTS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8") [$ENCODING] --endpoint value Endpoint for the service. [$ENDPOINT] --list-chunk value Size of blob list. (default: 5000) [$LIST_CHUNK] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] --msi-client-id value Object ID of the user-assigned MSI to use, if any. [$MSI_CLIENT_ID] --msi-mi-res-id value Azure resource ID of the user-assigned MSI to use, if any. [$MSI_MI_RES_ID] --msi-object-id value Object ID of the user-assigned MSI to use, if any. [$MSI_OBJECT_ID] @@ -317,7 +338,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/b2.md b/docs/en/cli-reference/storage/create/b2.md index 6d0be236..4adb9378 100644 --- a/docs/en/cli-reference/storage/create/b2.md +++ b/docs/en/cli-reference/storage/create/b2.md @@ -31,7 +31,7 @@ DESCRIPTION: * "force_cap_exceeded" These will be set in the "X-Bz-Test-Mode" header which is documented - in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html). + in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist). --versions Include old versions in directory listings. @@ -73,6 +73,16 @@ DESCRIPTION: 5,000,000 Bytes is the minimum size. + --upload-concurrency + Concurrency for multipart uploads. + + This is the number of chunks of the same file that are uploaded + concurrently. + + Note that chunks are stored in memory and there may be up to + "--transfers" * "--b2-upload-concurrency" chunks stored at once + in memory. + --disable-checksum Disable checksums for large (> upload cutoff) files. @@ -100,24 +110,51 @@ DESCRIPTION: (No trailing "/", "file" or "bucket") --download-auth-duration - Time before the authorization token will expire in s or suffix ms|s|m|h|d. + Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. + + This is used in combination with "rclone link" for making files + accessible to the public and sets the duration before the download + authorization token will expire. - The duration before the download authorization token will expire. The minimum value is 1 second. The maximum value is one week. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) + + --lifecycle + Set the number of days deleted files should be kept when creating a bucket. + + On bucket creation, this parameter is used to create a lifecycle rule + for the entire bucket. + + If lifecycle is 0 (the default) it does not create a lifecycle rule so + the default B2 behaviour applies. This is to create versions of files + on delete and overwrite and to keep them indefinitely. + + If lifecycle is >0 then it creates a single rule setting the number of + days before a file that is deleted or overwritten is deleted + permanently. This is known as daysFromHidingToDeleting in the b2 docs. + + The minimum value for this parameter is 1 day. + + You can also enable hard_delete in the config also which will mean + deletions won't cause versions but overwrites will still cause + versions to be made. + + See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --account value Account ID or Application Key ID. [$ACCOUNT] @@ -129,14 +166,17 @@ OPTIONS: --chunk-size value Upload chunk size. (default: "96Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4Gi") [$COPY_CUTOFF] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Disable checksums for large (> upload cutoff) files. (default: false) [$DISABLE_CHECKSUM] - --download-auth-duration value Time before the authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] + --download-auth-duration value Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --endpoint value Endpoint for the service. [$ENDPOINT] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] + --lifecycle value Set the number of days deleted files should be kept when creating a bucket. (default: 0) [$LIFECYCLE] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] --test-mode value A flag string for X-Bz-Test-Mode header for debugging. [$TEST_MODE] + --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --versions Include old versions in directory listings. (default: false) [$VERSIONS] @@ -154,7 +194,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/box.md b/docs/en/cli-reference/storage/create/box.md index 41123524..e0bdd12a 100644 --- a/docs/en/cli-reference/storage/create/box.md +++ b/docs/en/cli-reference/storage/create/box.md @@ -66,11 +66,28 @@ DESCRIPTION: --owned-by Only show items owned by the login (email address) passed in. + --impersonate + Impersonate this user ID when using a service account. + + Setting this flag allows rclone, when using a JWT service account, to + act on behalf of another user by setting the as-user header. + + The user ID is the Box identifier for a user. User IDs can found for + any user via the GET /users endpoint, which is only available to + admins, or by calling the GET /users/me endpoint with an authenticated + user session. + + See: https://developer.box.com/guides/authentication/jwt/as-user/ + + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-token value Box App Primary Access Token [$ACCESS_TOKEN] @@ -84,7 +101,9 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --commit-retries value Max number of times to try committing a multipart file. (default: 100) [$COMMIT_RETRIES] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot") [$ENCODING] + --impersonate value Impersonate this user ID when using a service account. [$IMPERSONATE] --list-chunk value Size of listing chunk 1-1000. (default: 1000) [$LIST_CHUNK] --owned-by value Only show items owned by the login (email address) passed in. [$OWNED_BY] --root-folder-id value Fill in for rclone to use a non root folder as its starting point. (default: "0") [$ROOT_FOLDER_ID] @@ -105,7 +124,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/drive.md b/docs/en/cli-reference/storage/create/drive.md index 1dc92c31..820f5790 100644 --- a/docs/en/cli-reference/storage/create/drive.md +++ b/docs/en/cli-reference/storage/create/drive.md @@ -34,7 +34,7 @@ DESCRIPTION: Leave blank to use the provider defaults. --scope - Scope that rclone should use when requesting access from drive. + Comma separated list of scopes that rclone should use when requesting access from drive. Examples: | drive | Full access all files, excluding Application Data Folder. @@ -95,14 +95,31 @@ DESCRIPTION: If given, gdocs practically become invisible to rclone. + --show-all-gdocs + Show all Google Docs including non-exportable ones in listings. + + If you try a server side copy on a Google Form without this flag, you + will get this error: + + No export formats found for "application/vnd.google-apps.form" + + However adding this flag will allow the form to be server side copied. + + Note that rclone doesn't add extensions to the Google Docs file names + in this mode. + + Do **not** use this flag when trying to download Google Docs - rclone + will fail to download them. + + --skip-checksum-gphotos - Skip MD5 checksum on Google photos and videos only. + Skip checksums on Google photos and videos only. Use this if you get checksum errors when transferring Google photos or videos. Setting this flag will cause Google photos and videos to return a - blank MD5 checksum. + blank checksums. Google photos are identified by being in the "photos" space. @@ -233,6 +250,8 @@ DESCRIPTION: Number of API calls to allow without sleeping. --server-side-across-configs + Deprecated: use --server-side-across-configs instead. + Allow server-side operations (e.g. copy) to work across different drive configs. This can be useful if you wish to do a server-side copy between two @@ -311,21 +330,115 @@ DESCRIPTION: Note also that opening the folder once in the web interface (with the user you've authenticated rclone with) seems to be enough so that the - resource key is no needed. + resource key is not needed. + + + --fast-list-bug-fix + Work around a bug in Google Drive listing. + + Normally rclone will work around a bug in Google Drive when using + --fast-list (ListR) where the search "(A in parents) or (B in + parents)" returns nothing sometimes. See #3114, #4289 and + https://issuetracker.google.com/issues/149522397 + + Rclone detects this by finding no items in more than one directory + when listing and retries them as lists of individual directories. + + This means that if you have a lot of empty directories rclone will end + up listing them all individually and this can take many more API + calls. + + This flag allows the work-around to be disabled. This is **not** + recommended in normal use - only if you have a particular case you are + having trouble with like many empty directories. + + + --metadata-owner + Control whether owner should be read or written in metadata. + + Owner is a standard part of the file metadata so is easy to read. But it + isn't always desirable to set the owner from the metadata. + + Note that you can't set the owner on Shared Drives, and that setting + ownership will generate an email to the new owner (this can't be + disabled), and you can't transfer ownership to someone outside your + organization. + + + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | failok | If writing fails log errors only, don't fail the transfer + | read,write | Read and Write the value. + + --metadata-permissions + Control whether permissions should be read or written in metadata. + + Reading permissions metadata from files can be done quickly, but it + isn't always desirable to set the permissions from the metadata. + + Note that rclone drops any inherited permissions on Shared Drives and + any owner permission on My Drives as these are duplicated in the owner + metadata. + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | failok | If writing fails log errors only, don't fail the transfer + | read,write | Read and Write the value. + + --metadata-labels + Control whether labels should be read or written in metadata. + + Reading labels metadata from files takes an extra API transaction and + will slow down listings. It isn't always desirable to set the labels + from the metadata. + + The format of labels is documented in the drive API documentation at + https://developers.google.com/drive/api/reference/rest/v3/Label - + rclone just provides a JSON dump of this format. + + When setting labels, the label and fields must already exist - rclone + will not create them. This means that if you are transferring labels + from two different accounts you will have to create the labels in + advance and use the metadata mapper to translate the IDs between the + two accounts. + + + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | failok | If writing fails log errors only, don't fail the transfer + | read,write | Read and Write the value. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --env-auth + Get IAM credentials from runtime (environment variables or instance meta data if no env vars). + + Only applies if service_account_file and service_account_credentials is blank. + + Examples: + | false | Enter credentials in the next step. + | true | Get GCP IAM credentials from the environment (env vars or IAM). + + --description + Description of the remote. + OPTIONS: --alternate-export Deprecated: No longer needed. (default: false) [$ALTERNATE_EXPORT] --client-id value Google Application Client Id [$CLIENT_ID] --client-secret value OAuth Client Secret. [$CLIENT_SECRET] --help, -h show help - --scope value Scope that rclone should use when requesting access from drive. [$SCOPE] + --scope value Comma separated list of scopes that rclone should use when requesting access from drive. [$SCOPE] --service-account-file value Service Account Credentials JSON file path. [$SERVICE_ACCOUNT_FILE] Advanced @@ -336,23 +449,30 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "8Mi") [$CHUNK_SIZE] --copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut. (default: false) [$COPY_SHORTCUT_CONTENT] + --description value Description of the remote. [$DESCRIPTION] --disable-http2 Disable drive using http2. (default: true) [$DISABLE_HTTP2] --encoding value The encoding for the backend. (default: "InvalidUtf8") [$ENCODING] + --env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars). (default: false) [$ENV_AUTH] --export-formats value Comma separated list of preferred formats for downloading Google docs. (default: "docx,xlsx,pptx,svg") [$EXPORT_FORMATS] + --fast-list-bug-fix Work around a bug in Google Drive listing. (default: true) [$FAST_LIST_BUG_FIX] --formats value Deprecated: See export_formats. [$FORMATS] --impersonate value Impersonate this user when using a service account. [$IMPERSONATE] --import-formats value Comma separated list of preferred formats for uploading Google docs. [$IMPORT_FORMATS] --keep-revision-forever Keep new head revision of each file forever. (default: false) [$KEEP_REVISION_FOREVER] --list-chunk value Size of listing chunk 100-1000, 0 to disable. (default: 1000) [$LIST_CHUNK] + --metadata-labels value Control whether labels should be read or written in metadata. (default: "off") [$METADATA_LABELS] + --metadata-owner value Control whether owner should be read or written in metadata. (default: "read") [$METADATA_OWNER] + --metadata-permissions value Control whether permissions should be read or written in metadata. (default: "off") [$METADATA_PERMISSIONS] --pacer-burst value Number of API calls to allow without sleeping. (default: 100) [$PACER_BURST] --pacer-min-sleep value Minimum time to sleep between API calls. (default: "100ms") [$PACER_MIN_SLEEP] --resource-key value Resource key for accessing a link-shared file. [$RESOURCE_KEY] --root-folder-id value ID of the root folder. [$ROOT_FOLDER_ID] - --server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] + --server-side-across-configs Deprecated: use --server-side-across-configs instead. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] --service-account-credentials value Service Account Credentials JSON blob. [$SERVICE_ACCOUNT_CREDENTIALS] --shared-with-me Only show files that are shared with me. (default: false) [$SHARED_WITH_ME] + --show-all-gdocs Show all Google Docs including non-exportable ones in listings. (default: false) [$SHOW_ALL_GDOCS] --size-as-quota Show sizes as storage quota usage, not actual size. (default: false) [$SIZE_AS_QUOTA] - --skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only. (default: false) [$SKIP_CHECKSUM_GPHOTOS] + --skip-checksum-gphotos Skip checksums on Google photos and videos only. (default: false) [$SKIP_CHECKSUM_GPHOTOS] --skip-dangling-shortcuts If set skip dangling shortcut files. (default: false) [$SKIP_DANGLING_SHORTCUTS] --skip-gdocs Skip google documents in all listings. (default: false) [$SKIP_GDOCS] --skip-shortcuts If set skip shortcut files. (default: false) [$SKIP_SHORTCUTS] @@ -382,7 +502,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/dropbox.md b/docs/en/cli-reference/storage/create/dropbox.md index 9d4cf26a..c5f903d3 100644 --- a/docs/en/cli-reference/storage/create/dropbox.md +++ b/docs/en/cli-reference/storage/create/dropbox.md @@ -80,6 +80,20 @@ DESCRIPTION: Note that we don't unmount the shared folder afterwards so the --dropbox-shared-folders can be omitted after the first use of a particular shared folder. + + See also --dropbox-root-namespace for an alternative way to work with shared + folders. + + --pacer-min-sleep + Minimum time to sleep between API calls. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --root-namespace + Specify a different Dropbox namespace ID to use as the root for all paths. --batch-mode Upload file batching sync|async|off. @@ -103,7 +117,7 @@ DESCRIPTION: This sets the batch size of files to upload. It has to be less than 1000. - By default this is 0 which means rclone which calculate the batch size + By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. - batch_mode: async - default batch_size is 100 @@ -127,18 +141,16 @@ DESCRIPTION: The default for this is 0 which means rclone will choose a sensible default based on the batch_mode in use. - - batch_mode: async - default batch_timeout is 500ms - - batch_mode: sync - default batch_timeout is 10s + - batch_mode: async - default batch_timeout is 10s + - batch_mode: sync - default batch_timeout is 500ms - batch_mode: off - not in use --batch-commit-timeout Max time to wait for a batch to finish committing - --encoding - The encoding for the backend. - - See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. OPTIONS: @@ -154,8 +166,11 @@ OPTIONS: --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] --chunk-size value Upload chunk size (< 150Mi). (default: "48Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot") [$ENCODING] --impersonate value Impersonate this user when using a business account. [$IMPERSONATE] + --pacer-min-sleep value Minimum time to sleep between API calls. (default: "10ms") [$PACER_MIN_SLEEP] + --root-namespace value Specify a different Dropbox namespace ID to use as the root for all paths. [$ROOT_NAMESPACE] --shared-files Instructs rclone to work on individual shared files. (default: false) [$SHARED_FILES] --shared-folders Instructs rclone to work on shared folders. (default: false) [$SHARED_FOLDERS] --token value OAuth Access Token as a JSON blob. [$TOKEN] @@ -174,7 +189,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/fichier.md b/docs/en/cli-reference/storage/create/fichier.md index bc163e18..c0c816ae 100644 --- a/docs/en/cli-reference/storage/create/fichier.md +++ b/docs/en/cli-reference/storage/create/fichier.md @@ -21,11 +21,17 @@ DESCRIPTION: --folder-password If you want to list the files in a shared folder that is password protected, add this parameter. + --cdn + Set if you wish to use CDN download links. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --api-key value Your API Key, get it from https://1fichier.com/console/params.pl. [$API_KEY] @@ -33,6 +39,8 @@ OPTIONS: Advanced + --cdn Set if you wish to use CDN download links. (default: false) [$CDN] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot") [$ENCODING] --file-password value If you want to download a shared file that is password protected, add this parameter. [$FILE_PASSWORD] --folder-password value If you want to list the files in a shared folder that is password protected, add this parameter. [$FOLDER_PASSWORD] @@ -51,7 +59,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/filefabric.md b/docs/en/cli-reference/storage/create/filefabric.md index b4d26bcc..3da3cda6 100644 --- a/docs/en/cli-reference/storage/create/filefabric.md +++ b/docs/en/cli-reference/storage/create/filefabric.md @@ -64,6 +64,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -73,6 +76,7 @@ OPTIONS: Advanced + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --token value Session Token. [$TOKEN] --token-expiry value Token expiry time. [$TOKEN_EXPIRY] @@ -91,7 +95,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/ftp.md b/docs/en/cli-reference/storage/create/ftp.md index 62835101..f3abb06d 100644 --- a/docs/en/cli-reference/storage/create/ftp.md +++ b/docs/en/cli-reference/storage/create/ftp.md @@ -104,6 +104,16 @@ DESCRIPTION: If this is set and no password is supplied then rclone will ask for a password + --socks-proxy + Socks 5 proxy host. + + Supports the format user:pass@host:port, user@host:port, host:port. + + Example: + + myUser:myPass@localhost:9005 + + --encoding The encoding for the backend. @@ -114,6 +124,9 @@ DESCRIPTION: | BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket | PureFTPd can't handle '[]' or '*' in file names | Ctl,LeftPeriod,Slash | VsFTPd can't handle file names starting with dot + --description + Description of the remote. + OPTIONS: --explicit-tls Use Explicit FTPS (FTP over TLS). (default: false) [$EXPLICIT_TLS] @@ -129,6 +142,7 @@ OPTIONS: --ask-password Allow asking for FTP password when needed. (default: false) [$ASK_PASSWORD] --close-timeout value Maximum time to wait for a response to close. (default: "1m0s") [$CLOSE_TIMEOUT] --concurrency value Maximum number of FTP simultaneous connections, 0 for unlimited. (default: 0) [$CONCURRENCY] + --description value Description of the remote. [$DESCRIPTION] --disable-epsv Disable using EPSV even if server advertises support. (default: false) [$DISABLE_EPSV] --disable-mlsd Disable using MLSD even if server advertises support. (default: false) [$DISABLE_MLSD] --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) (default: false) [$DISABLE_TLS13] @@ -138,6 +152,7 @@ OPTIONS: --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] --no-check-certificate Do not verify the TLS certificate of the server. (default: false) [$NO_CHECK_CERTIFICATE] --shut-timeout value Maximum time to wait for data connection closing status. (default: "1m0s") [$SHUT_TIMEOUT] + --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] --tls-cache-size value Size of TLS session cache for all control and data connections. (default: 32) [$TLS_CACHE_SIZE] --writing-mdtm Use MDTM to set modification time (VsFtpd quirk) (default: false) [$WRITING_MDTM] @@ -154,7 +169,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/gcs.md b/docs/en/cli-reference/storage/create/gcs.md index 61cd0250..d2980af3 100644 --- a/docs/en/cli-reference/storage/create/gcs.md +++ b/docs/en/cli-reference/storage/create/gcs.md @@ -37,6 +37,11 @@ DESCRIPTION: Optional - needed only for list/create/delete buckets - see your developer console. + --user-project + User project. + + Optional - needed only for requester pays. + --service-account-file Service Account Credentials JSON file path. @@ -155,6 +160,13 @@ DESCRIPTION: | ARCHIVE | Archive storage class | DURABLE_REDUCED_AVAILABILITY | Durable reduced availability storage class + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -192,6 +204,9 @@ DESCRIPTION: | false | Enter credentials in the next step. | true | Get GCP IAM credentials from the environment (env vars or IAM). + --description + Description of the remote. + OPTIONS: --anonymous Access public buckets and objects without credentials. (default: false) [$ANONYMOUS] @@ -207,16 +222,19 @@ OPTIONS: --service-account-credentials value Service Account Credentials JSON blob. [$SERVICE_ACCOUNT_CREDENTIALS] --service-account-file value Service Account Credentials JSON file path. [$SERVICE_ACCOUNT_FILE] --storage-class value The storage class to use when storing objects in Google Cloud Storage. [$STORAGE_CLASS] + --user-project value User project. [$USER_PROJECT] Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] - --endpoint value Endpoint for the service. [$ENDPOINT] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] + --endpoint value Endpoint for the service. [$ENDPOINT] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -231,7 +249,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/gphotos.md b/docs/en/cli-reference/storage/create/gphotos.md index f4a19b41..90be0f18 100644 --- a/docs/en/cli-reference/storage/create/gphotos.md +++ b/docs/en/cli-reference/storage/create/gphotos.md @@ -70,6 +70,61 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --batch-mode + Upload file batching sync|async|off. + + This sets the batch mode used by rclone. + + This has 3 possible values + + - off - no batching + - sync - batch uploads and check completion (default) + - async - batch upload and don't check completion + + Rclone will close any outstanding batches when it exits which may make + a delay on quit. + + + --batch-size + Max number of files in upload batch. + + This sets the batch size of files to upload. It has to be less than 50. + + By default this is 0 which means rclone will calculate the batch size + depending on the setting of batch_mode. + + - batch_mode: async - default batch_size is 50 + - batch_mode: sync - default batch_size is the same as --transfers + - batch_mode: off - not in use + + Rclone will close any outstanding batches when it exits which may make + a delay on quit. + + Setting this is a great idea if you are uploading lots of small files + as it will make them a lot quicker. You can use --transfers 32 to + maximise throughput. + + + --batch-timeout + Max time to allow an idle upload batch before uploading. + + If an upload batch is idle for more than this long then it will be + uploaded. + + The default for this is 0 which means rclone will choose a sensible + default based on the batch_mode in use. + + - batch_mode: async - default batch_timeout is 10s + - batch_mode: sync - default batch_timeout is 1s + - batch_mode: off - not in use + + + --batch-commit-timeout + Max time to wait for a batch to finish committing + + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -79,13 +134,18 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] - --include-archived Also view and download archived media. (default: false) [$INCLUDE_ARCHIVED] - --read-size Set to read the size of media items. (default: false) [$READ_SIZE] - --start-year value Year limits the photos to be downloaded to those which are uploaded after the given year. (default: 2000) [$START_YEAR] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --batch-commit-timeout value Max time to wait for a batch to finish committing (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] + --batch-mode value Upload file batching sync|async|off. (default: "sync") [$BATCH_MODE] + --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] + --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] + --include-archived Also view and download archived media. (default: false) [$INCLUDE_ARCHIVED] + --read-size Set to read the size of media items. (default: false) [$READ_SIZE] + --start-year value Year limits the photos to be downloaded to those which are uploaded after the given year. (default: 2000) [$START_YEAR] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -100,7 +160,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/hdfs.md b/docs/en/cli-reference/storage/create/hdfs.md index b382f424..69dc774d 100644 --- a/docs/en/cli-reference/storage/create/hdfs.md +++ b/docs/en/cli-reference/storage/create/hdfs.md @@ -10,9 +10,9 @@ USAGE: DESCRIPTION: --namenode - Hadoop name node and port. + Hadoop name nodes and ports. - E.g. "namenode:8020" to connect to host namenode at port 8020. + E.g. "namenode-1:8020,namenode-2:8020,..." to connect to host namenodes at port 8020. --username Hadoop user name. @@ -31,9 +31,9 @@ DESCRIPTION: Kerberos data transfer protection: authentication|integrity|privacy. Specifies whether or not authentication, data signature integrity - checks, and wire encryption is required when communicating the the - datanodes. Possible values are 'authentication', 'integrity' and - 'privacy'. Used only with KERBEROS enabled. + checks, and wire encryption are required when communicating with + the datanodes. Possible values are 'authentication', 'integrity' + and 'privacy'. Used only with KERBEROS enabled. Examples: | privacy | Ensure authentication, integrity and encryption enabled. @@ -43,15 +43,19 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help - --namenode value Hadoop name node and port. [$NAMENODE] + --namenode value Hadoop name nodes and ports. [$NAMENODE] --username value Hadoop user name. [$USERNAME] Advanced --data-transfer-protection value Kerberos data transfer protection: authentication|integrity|privacy. [$DATA_TRANSFER_PROTECTION] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Colon,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --service-principal-name value Kerberos service principal name for the namenode. [$SERVICE_PRINCIPAL_NAME] @@ -68,7 +72,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/hidrive.md b/docs/en/cli-reference/storage/create/hidrive.md index 5942cf09..1068f63f 100644 --- a/docs/en/cli-reference/storage/create/hidrive.md +++ b/docs/en/cli-reference/storage/create/hidrive.md @@ -107,6 +107,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -118,6 +121,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Chunksize for chunked uploads. (default: "48Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] --disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary. (default: false) [$DISABLE_FETCHING_MEMBER_COUNT] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] --endpoint value Endpoint for the service. (default: "https://api.hidrive.strato.com/2.1") [$ENDPOINT] @@ -141,7 +145,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/http.md b/docs/en/cli-reference/storage/create/http.md index d67b31be..5d34cc3e 100644 --- a/docs/en/cli-reference/storage/create/http.md +++ b/docs/en/cli-reference/storage/create/http.md @@ -56,16 +56,24 @@ DESCRIPTION: that directory listings are much quicker, but rclone won't have the times or sizes of any files, and some files that don't exist may be in the listing. + --no-escape + Do not escape URL metacharacters in path names. + + --description + Description of the remote. + OPTIONS: --help, -h show help + --no-escape Do not escape URL metacharacters in path names. (default: false) [$NO_ESCAPE] --url value URL of HTTP host to connect to. [$URL] Advanced - --headers value Set HTTP headers for all transactions. [$HEADERS] - --no-head Don't use HEAD requests. (default: false) [$NO_HEAD] - --no-slash Set this if the site doesn't end directories with /. (default: false) [$NO_SLASH] + --description value Description of the remote. [$DESCRIPTION] + --headers value Set HTTP headers for all transactions. [$HEADERS] + --no-head Don't use HEAD requests. (default: false) [$NO_HEAD] + --no-slash Set this if the site doesn't end directories with /. (default: false) [$NO_SLASH] Client Config @@ -80,7 +88,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/internetarchive.md b/docs/en/cli-reference/storage/create/internetarchive.md index de1a40a7..f226f4f7 100644 --- a/docs/en/cli-reference/storage/create/internetarchive.md +++ b/docs/en/cli-reference/storage/create/internetarchive.md @@ -47,6 +47,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-key-id value IAS3 Access Key. [$ACCESS_KEY_ID] @@ -55,6 +58,7 @@ OPTIONS: Advanced + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone. (default: true) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --endpoint value IAS3 Endpoint. (default: "https://s3.us.archive.org") [$ENDPOINT] @@ -74,7 +78,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/jottacloud.md b/docs/en/cli-reference/storage/create/jottacloud.md index cb777e99..fedd1836 100644 --- a/docs/en/cli-reference/storage/create/jottacloud.md +++ b/docs/en/cli-reference/storage/create/jottacloud.md @@ -9,6 +9,29 @@ USAGE: singularity storage create jottacloud [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --md5-memory-limit Files bigger than this will be cached on disk to calculate the MD5 if required. @@ -31,16 +54,25 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --help, -h show help + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help Advanced + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] --md5-memory-limit value Files bigger than this will be cached on disk to calculate the MD5 if required. (default: "10Mi") [$MD5_MEMORY_LIMIT] --no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them. (default: false) [$NO_VERSIONS] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] --trashed-only Only show files that are in the trash. (default: false) [$TRASHED_ONLY] --upload-resume-limit value Files bigger than this can be resumed if the upload fail's. (default: "10Mi") [$UPLOAD_RESUME_LIMIT] @@ -57,7 +89,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/koofr/digistorage.md b/docs/en/cli-reference/storage/create/koofr/digistorage.md index 92a24cc6..346715a7 100644 --- a/docs/en/cli-reference/storage/create/koofr/digistorage.md +++ b/docs/en/cli-reference/storage/create/koofr/digistorage.md @@ -23,24 +23,28 @@ DESCRIPTION: Your user name. --password - Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). + Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help - --password value Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). [$PASSWORD] + --password value Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. [$PASSWORD] --user value Your user name. [$USER] Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --mountid value Mount ID of the mount to use. [$MOUNTID] - --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --mountid value Mount ID of the mount to use. [$MOUNTID] + --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] Client Config @@ -55,7 +59,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/koofr/koofr.md b/docs/en/cli-reference/storage/create/koofr/koofr.md index acfdae98..76e6ba8a 100644 --- a/docs/en/cli-reference/storage/create/koofr/koofr.md +++ b/docs/en/cli-reference/storage/create/koofr/koofr.md @@ -23,24 +23,28 @@ DESCRIPTION: Your user name. --password - Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). + Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help - --password value Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). [$PASSWORD] + --password value Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. [$PASSWORD] --user value Your user name. [$USER] Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --mountid value Mount ID of the mount to use. [$MOUNTID] - --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --mountid value Mount ID of the mount to use. [$MOUNTID] + --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] Client Config @@ -55,7 +59,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/koofr/other.md b/docs/en/cli-reference/storage/create/koofr/other.md index cb2fecec..4ca6a92f 100644 --- a/docs/en/cli-reference/storage/create/koofr/other.md +++ b/docs/en/cli-reference/storage/create/koofr/other.md @@ -33,6 +33,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --endpoint value The Koofr API endpoint to use. [$ENDPOINT] @@ -42,9 +45,10 @@ OPTIONS: Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --mountid value Mount ID of the mount to use. [$MOUNTID] - --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --mountid value Mount ID of the mount to use. [$MOUNTID] + --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] Client Config @@ -59,7 +63,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/local.md b/docs/en/cli-reference/storage/create/local.md index 6b626af1..9b4decdf 100644 --- a/docs/en/cli-reference/storage/create/local.md +++ b/docs/en/cli-reference/storage/create/local.md @@ -81,6 +81,11 @@ DESCRIPTION: - Only checksum the size that stat gave - Don't update the stat info for the file + **NB** do not use this flag on a Windows Volume Shadow (VSS). For some + unknown reason, files in a VSS sometimes show different sizes from the + directory listing (where the initial stat value comes from on Windows) + and when stat is called on them directly. Other copy tools always use + the direct stat value and setting this flag will disable that. --one-file-system @@ -100,6 +105,24 @@ DESCRIPTION: Windows/macOS and case sensitive for everything else. Use this flag to override the default choice. + --no-clone + Disable reflink cloning for server-side copies. + + Normally, for local-to-local transfers, rclone will "clone" the file when + possible, and fall back to "copying" only when cloning is not supported. + + Cloning creates a shallow copy (or "reflink") which initially shares blocks with + the original file. Unlike a "hardlink", the two files are independent and + neither will affect the other if subsequently modified. + + Cloning is usually preferable to copying, as it is much faster and is + deduplicated by default (i.e. having two identical files does not consume more + storage than having just one.) However, for use cases where data redundancy is + preferable, --local-no-clone can be used to disable cloning and force "deep" copies. + + Currently, cloning is only supported when using APFS on macOS (support for other + platforms may be added in the future.) + --no-preallocate Disable preallocation of disk space for transferred files. @@ -126,11 +149,41 @@ DESCRIPTION: when copying to a CIFS mount owned by another user. If this option is enabled, rclone will no longer update the modtime after copying a file. + --time-type + Set what kind of time is returned. + + Normally rclone does all operations on the mtime or Modification time. + + If you set this flag then rclone will return the Modified time as whatever + you set here. So if you use "rclone lsl --local-time-type ctime" then + you will see ctimes in the listing. + + If the OS doesn't support returning the time_type specified then rclone + will silently replace it with the modification time which all OSes support. + + - mtime is supported by all OSes + - atime is supported on all OSes except: plan9, js + - btime is only supported on: Windows, macOS, freebsd, netbsd + - ctime is supported on all Oses except: Windows, plan9, js + + Note that setting the time will still set the modified time so this is + only useful for reading. + + + Examples: + | mtime | The last modification time. + | atime | The last access time. + | btime | The creation time. + | ctime | The last status change time. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -140,15 +193,18 @@ OPTIONS: --case-insensitive Force the filesystem to report itself as case insensitive. (default: false) [$CASE_INSENSITIVE] --case-sensitive Force the filesystem to report itself as case sensitive. (default: false) [$CASE_SENSITIVE] --copy-links, -L Follow symlinks and copy the pointed to item. (default: false) [$COPY_LINKS] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] --links, -l Translate symlinks to/from regular files with a '.rclonelink' extension. (default: false) [$LINKS] --no-check-updated Don't check to see if the files change during upload. (default: false) [$NO_CHECK_UPDATED] + --no-clone Disable reflink cloning for server-side copies. (default: false) [$NO_CLONE] --no-preallocate Disable preallocation of disk space for transferred files. (default: false) [$NO_PREALLOCATE] --no-set-modtime Disable setting modtime. (default: false) [$NO_SET_MODTIME] --no-sparse Disable sparse files for multi-thread downloads. (default: false) [$NO_SPARSE] --nounc Disable UNC (long path names) conversion on Windows. (default: false) [$NOUNC] --one-file-system, -x Don't cross filesystem boundaries (unix/macOS only). (default: false) [$ONE_FILE_SYSTEM] --skip-links Don't warn about skipped symlinks. (default: false) [$SKIP_LINKS] + --time-type value Set what kind of time is returned. (default: "mtime") [$TIME_TYPE] --unicode-normalization Apply unicode NFC normalization to paths and filenames. (default: false) [$UNICODE_NORMALIZATION] --zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated). (default: false) [$ZERO_SIZE_LINKS] diff --git a/docs/en/cli-reference/storage/create/mailru.md b/docs/en/cli-reference/storage/create/mailru.md index c30a24ee..21dc520b 100644 --- a/docs/en/cli-reference/storage/create/mailru.md +++ b/docs/en/cli-reference/storage/create/mailru.md @@ -9,6 +9,29 @@ USAGE: singularity storage create mailru [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --user User name (usually email). @@ -91,21 +114,30 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --help, -h show help - --pass value Password. [$PASS] - --speedup-enable Skip full upload if there is another file with same data hash. (default: true) [$SPEEDUP_ENABLE] - --user value User name (usually email). [$USER] + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help + --pass value Password. [$PASS] + --speedup-enable Skip full upload if there is another file with same data hash. (default: true) [$SPEEDUP_ENABLE] + --user value User name (usually email). [$USER] Advanced + --auth-url value Auth server URL. [$AUTH_URL] --check-hash What should copy do if file checksum is mismatched or invalid. (default: true) [$CHECK_HASH] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --quirks value Comma separated list of internal maintenance flags. [$QUIRKS] --speedup-file-patterns value Comma separated list of file name patterns eligible for speedup (put by hash). (default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") [$SPEEDUP_FILE_PATTERNS] --speedup-max-disk value This option allows you to disable speedup (put by hash) for large files. (default: "3Gi") [$SPEEDUP_MAX_DISK] --speedup-max-memory value Files larger than the size given below will always be hashed on disk. (default: "32Mi") [$SPEEDUP_MAX_MEMORY] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] --user-agent value HTTP user agent used internally by client. [$USER_AGENT] Client Config @@ -121,7 +153,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/mega.md b/docs/en/cli-reference/storage/create/mega.md index 4cccd67c..1e6c8778 100644 --- a/docs/en/cli-reference/storage/create/mega.md +++ b/docs/en/cli-reference/storage/create/mega.md @@ -34,7 +34,7 @@ DESCRIPTION: MEGA uses plain text HTTP connections by default. Some ISPs throttle HTTP connections, this causes transfers to become very slow. Enabling this will force MEGA to use HTTPS for all transfers. - HTTPS is normally not necesary since all data is already encrypted anyway. + HTTPS is normally not necessary since all data is already encrypted anyway. Enabling it will increase CPU usage and add network overhead. --encoding @@ -42,6 +42,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -50,10 +53,11 @@ OPTIONS: Advanced - --debug Output more debug from Mega. (default: false) [$DEBUG] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] - --use-https Use HTTPS for transfers. (default: false) [$USE_HTTPS] + --debug Output more debug from Mega. (default: false) [$DEBUG] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --use-https Use HTTPS for transfers. (default: false) [$USE_HTTPS] Client Config @@ -68,7 +72,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/netstorage.md b/docs/en/cli-reference/storage/create/netstorage.md index 180c2f54..88b5db32 100644 --- a/docs/en/cli-reference/storage/create/netstorage.md +++ b/docs/en/cli-reference/storage/create/netstorage.md @@ -32,6 +32,9 @@ DESCRIPTION: Please choose the 'y' option to set your own password then enter your secret. + --description + Description of the remote. + OPTIONS: --account value Set the NetStorage account name [$ACCOUNT] @@ -41,7 +44,8 @@ OPTIONS: Advanced - --protocol value Select between HTTP or HTTPS protocol. (default: "https") [$PROTOCOL] + --description value Description of the remote. [$DESCRIPTION] + --protocol value Select between HTTP or HTTPS protocol. (default: "https") [$PROTOCOL] Client Config @@ -56,7 +60,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/onedrive.md b/docs/en/cli-reference/storage/create/onedrive.md index 99879457..85645aee 100644 --- a/docs/en/cli-reference/storage/create/onedrive.md +++ b/docs/en/cli-reference/storage/create/onedrive.md @@ -93,11 +93,15 @@ DESCRIPTION: listing, set this option. --server-side-across-configs + Deprecated: use --server-side-across-configs instead. + Allow server-side operations (e.g. copy) to work across different onedrive configs. - This will only work if you are copying between two OneDrive *Personal* drives AND - the files to copy are already shared between them. In other cases, rclone will - fall back to normal copy (which will be slightly slower). + This will work if you are copying between two OneDrive *Personal* drives AND the files to + copy are already shared between them. Additionally, it should also function for a user who + has access permissions both between Onedrive for *business* and *SharePoint* under the *same + tenant*, and between *SharePoint* and another *SharePoint* under the *same tenant*. In other + cases, rclone will fall back to normal copy (which will be slightly slower). --list-chunk Size of listing chunk. @@ -117,6 +121,16 @@ DESCRIPTION: this flag there. + --hard-delete + Permanently delete files on removal. + + Normally files will get sent to the recycle bin on deletion. Setting + this flag causes them to be permanently deleted. Use with care. + + OneDrive personal accounts do not support the permanentDelete API, + it only applies to OneDrive for Business and SharePoint document libraries. + + --link-scope Set the scope of the links created by the link command. @@ -145,7 +159,7 @@ DESCRIPTION: Specify the hash in use for the backend. This specifies the hash type in use. If set to "auto" it will use the - default hash which is is QuickXorHash. + default hash which is QuickXorHash. Before rclone 1.62 an SHA1 hash was used by default for Onedrive Personal. For 1.62 and later the default is to use a QuickXorHash for @@ -153,7 +167,7 @@ DESCRIPTION: accordingly. From July 2023 QuickXorHash will be the only available hash for - both OneDrive for Business and OneDriver Personal. + both OneDrive for Business and OneDrive Personal. This can be set to "none" to not use any hashes. @@ -170,11 +184,73 @@ DESCRIPTION: | crc32 | CRC32 | none | None - don't use any hashes + --av-override + Allows download of files the server thinks has a virus. + + The onedrive/sharepoint server may check files uploaded with an Anti + Virus checker. If it detects any potential viruses or malware it will + block download of the file. + + In this case you will see a message like this + + server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden: + + If you are 100% sure you want to download this file anyway then use + the --onedrive-av-override flag, or av_override = true in the config + file. + + + --delta + If set rclone will use delta listing to implement recursive listings. + + If this flag is set the onedrive backend will advertise `ListR` + support for recursive listings. + + Setting this flag speeds up these things greatly: + + rclone lsf -R onedrive: + rclone size onedrive: + rclone rc vfs/refresh recursive=true + + **However** the delta listing API **only** works at the root of the + drive. If you use it not at the root then it recurses from the root + and discards all the data that is not under the directory you asked + for. So it will be correct but may not be very efficient. + + This is why this flag is not set as the default. + + As a rule of thumb if nearly all of your data is under rclone's root + directory (the `root/directory` in `onedrive:root/directory`) then + using this flag will be be a big performance win. If your data is + mostly not under the root then using this flag will be a big + performance loss. + + It is recommended if you are mounting your onedrive at the root + (or near the root when using crypt) and using rclone `rc vfs/refresh`. + + + --metadata-permissions + Control whether permissions should be read or written in metadata. + + Reading permissions metadata from files can be done quickly, but it + isn't always desirable to set the permissions from the metadata. + + + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | read,write | Read and Write the value. + | failok | If writing fails log errors only, don't fail the transfer + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -186,20 +262,25 @@ OPTIONS: --access-scopes value Set scopes to be requested by rclone. (default: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access") [$ACCESS_SCOPES] --auth-url value Auth server URL. [$AUTH_URL] + --av-override Allows download of files the server thinks has a virus. (default: false) [$AV_OVERRIDE] --chunk-size value Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default: "10Mi") [$CHUNK_SIZE] + --delta If set rclone will use delta listing to implement recursive listings. (default: false) [$DELTA] + --description value Description of the remote. [$DESCRIPTION] --disable-site-permission Disable the request for Sites.Read.All permission. (default: false) [$DISABLE_SITE_PERMISSION] --drive-id value The ID of the drive to use. [$DRIVE_ID] --drive-type value The type of the drive (personal | business | documentLibrary). [$DRIVE_TYPE] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --expose-onenote-files Set to make OneNote files show up in directory listings. (default: false) [$EXPOSE_ONENOTE_FILES] + --hard-delete Permanently delete files on removal. (default: false) [$HARD_DELETE] --hash-type value Specify the hash in use for the backend. (default: "auto") [$HASH_TYPE] --link-password value Set the password for links created by the link command. [$LINK_PASSWORD] --link-scope value Set the scope of the links created by the link command. (default: "anonymous") [$LINK_SCOPE] --link-type value Set the type of the links created by the link command. (default: "view") [$LINK_TYPE] --list-chunk value Size of listing chunk. (default: 1000) [$LIST_CHUNK] + --metadata-permissions value Control whether permissions should be read or written in metadata. (default: "off") [$METADATA_PERMISSIONS] --no-versions Remove all versions on modifying operations. (default: false) [$NO_VERSIONS] --root-folder-id value ID of the root folder. [$ROOT_FOLDER_ID] - --server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] + --server-side-across-configs Deprecated: use --server-side-across-configs instead. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] --token value OAuth Access Token as a JSON blob. [$TOKEN] --token-url value Token server url. [$TOKEN_URL] @@ -216,7 +297,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/oos/README.md b/docs/en/cli-reference/storage/create/oos/README.md index 607631ce..a1fcc757 100644 --- a/docs/en/cli-reference/storage/create/oos/README.md +++ b/docs/en/cli-reference/storage/create/oos/README.md @@ -18,6 +18,8 @@ COMMANDS: user_principal_auth use an OCI user and an API key for authentication. you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key. https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm + workload_identity_auth use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). + https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm help, h Shows a list of commands or help for one command OPTIONS: diff --git a/docs/en/cli-reference/storage/create/oos/env_auth.md b/docs/en/cli-reference/storage/create/oos/env_auth.md index 204137dc..840e3bfd 100644 --- a/docs/en/cli-reference/storage/create/oos/env_auth.md +++ b/docs/en/cli-reference/storage/create/oos/env_auth.md @@ -41,9 +41,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -64,6 +63,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -102,7 +113,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -110,6 +121,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -145,7 +166,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -162,6 +183,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -172,18 +196,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -201,7 +228,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md b/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md index ca3b51bb..88df806e 100644 --- a/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md +++ b/docs/en/cli-reference/storage/create/oos/instance_principal_auth.md @@ -45,9 +45,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -68,6 +67,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -106,7 +117,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -114,6 +125,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -149,7 +170,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -166,6 +187,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -176,18 +200,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -205,7 +232,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/oos/no_auth.md b/docs/en/cli-reference/storage/create/oos/no_auth.md index 3c34724e..4a509000 100644 --- a/docs/en/cli-reference/storage/create/oos/no_auth.md +++ b/docs/en/cli-reference/storage/create/oos/no_auth.md @@ -38,9 +38,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -61,6 +60,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -99,7 +110,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -107,6 +118,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -142,7 +163,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -159,6 +180,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --endpoint value Endpoint for Object storage API. [$ENDPOINT] @@ -168,18 +192,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -197,7 +224,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md b/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md index 22a80824..f9c0d599 100644 --- a/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md +++ b/docs/en/cli-reference/storage/create/oos/resource_principal_auth.md @@ -41,9 +41,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -64,6 +63,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -102,7 +113,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -110,6 +121,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -145,7 +166,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -162,6 +183,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -172,18 +196,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -201,7 +228,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/oos/user_principal_auth.md b/docs/en/cli-reference/storage/create/oos/user_principal_auth.md index 0767d4b1..ff7103df 100644 --- a/docs/en/cli-reference/storage/create/oos/user_principal_auth.md +++ b/docs/en/cli-reference/storage/create/oos/user_principal_auth.md @@ -57,9 +57,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -80,6 +79,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -118,7 +129,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -126,6 +137,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -161,7 +182,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -178,6 +199,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -190,18 +214,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -219,7 +246,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md b/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md new file mode 100644 index 00000000..a193b51a --- /dev/null +++ b/docs/en/cli-reference/storage/create/oos/workload_identity_auth.md @@ -0,0 +1,250 @@ +# use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). +https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create oos workload_identity_auth - use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). + https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm + +USAGE: + singularity storage create oos workload_identity_auth [command options] + +DESCRIPTION: + --namespace + Object storage namespace + + --compartment + Object storage compartment OCID + + --region + Object storage Region + + --endpoint + Endpoint for Object storage API. + + Leave blank to use the default endpoint for the region. + + --storage-tier + The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm + + Examples: + | Standard | Standard storage tier, this is the default tier + | InfrequentAccess | InfrequentAccess storage tier + | Archive | Archive storage tier + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. + + Note that "upload_concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --upload-concurrency + Concurrency for multipart uploads. + + This is the number of chunks of the same file that are uploaded + concurrently. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --copy-timeout + Timeout for copy. + + Copy is an asynchronous operation, specify timeout to wait for copy to succeed + + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --leave-parts-on-error + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + + It should be set to true for resuming uploads across different sessions. + + WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add + additional costs if not cleaned up. + + + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. + + + --sse-customer-key-file + To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated + with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.' + + Examples: + | | None + + --sse-customer-key + To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to + encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is + needed. For more information, see Using Your Own Keys for Server-Side Encryption + (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm) + + Examples: + | | None + + --sse-customer-key-sha256 + If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption + key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for + Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm). + + Examples: + | | None + + --sse-kms-key-id + if using your own master key in vault, this header specifies the + OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call + the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. + Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. + + Examples: + | | None + + --sse-customer-algorithm + If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Object Storage supports "AES256" as the encryption algorithm. For more information, see + Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm). + + Examples: + | | None + | AES256 | AES256 + + --description + Description of the remote. + + +OPTIONS: + --compartment value Object storage compartment OCID [$COMPARTMENT] + --endpoint value Endpoint for Object storage API. [$ENDPOINT] + --help, -h show help + --namespace value Object storage namespace [$NAMESPACE] + --region value Object storage Region [$REGION] + + Advanced + + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] + --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] + --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] + --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/opendrive.md b/docs/en/cli-reference/storage/create/opendrive.md index 90f363b0..122ee75f 100644 --- a/docs/en/cli-reference/storage/create/opendrive.md +++ b/docs/en/cli-reference/storage/create/opendrive.md @@ -26,6 +26,9 @@ DESCRIPTION: Note that these chunks are buffered in memory so increasing them will increase memory use. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -34,8 +37,9 @@ OPTIONS: Advanced - --chunk-size value Files will be uploaded in chunks this size. (default: "10Mi") [$CHUNK_SIZE] - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot") [$ENCODING] + --chunk-size value Files will be uploaded in chunks this size. (default: "10Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot") [$ENCODING] Client Config @@ -50,7 +54,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/pcloud.md b/docs/en/cli-reference/storage/create/pcloud.md index de5f99d5..8b362caa 100644 --- a/docs/en/cli-reference/storage/create/pcloud.md +++ b/docs/en/cli-reference/storage/create/pcloud.md @@ -62,6 +62,9 @@ DESCRIPTION: --password Your pcloud password. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -71,6 +74,7 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hostname value Hostname to connect to. (default: "api.pcloud.com") [$HOSTNAME] --password value Your pcloud password. [$PASSWORD] @@ -92,7 +96,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/premiumizeme.md b/docs/en/cli-reference/storage/create/premiumizeme.md index de7a275d..2f634dd4 100644 --- a/docs/en/cli-reference/storage/create/premiumizeme.md +++ b/docs/en/cli-reference/storage/create/premiumizeme.md @@ -9,6 +9,29 @@ USAGE: singularity storage create premiumizeme [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --api-key API Key. @@ -20,14 +43,23 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --api-key value API Key. [$API_KEY] - --help, -h show help + --api-key value API Key. [$API_KEY] + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help Advanced - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -42,7 +74,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/putio.md b/docs/en/cli-reference/storage/create/putio.md index 10ca4698..1e90b249 100644 --- a/docs/en/cli-reference/storage/create/putio.md +++ b/docs/en/cli-reference/storage/create/putio.md @@ -9,18 +9,50 @@ USAGE: singularity storage create putio [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --help, -h show help + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -35,7 +67,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/qingstor.md b/docs/en/cli-reference/storage/create/qingstor.md index d8af3378..bb7eea6f 100644 --- a/docs/en/cli-reference/storage/create/qingstor.md +++ b/docs/en/cli-reference/storage/create/qingstor.md @@ -85,6 +85,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-key-id value QingStor Access Key ID. [$ACCESS_KEY_ID] @@ -98,6 +101,7 @@ OPTIONS: --chunk-size value Chunk size to use for uploading. (default: "4Mi") [$CHUNK_SIZE] --connection-retries value Number of connection retries. (default: 3) [$CONNECTION_RETRIES] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Ctl,InvalidUtf8") [$ENCODING] --upload-concurrency value Concurrency for multipart uploads. (default: 1) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -115,7 +119,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/README.md b/docs/en/cli-reference/storage/create/s3/README.md index 3e387b2c..d4906375 100644 --- a/docs/en/cli-reference/storage/create/s3/README.md +++ b/docs/en/cli-reference/storage/create/s3/README.md @@ -1,9 +1,9 @@ -# Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi +# Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others {% code fullWidth="true" %} ``` NAME: - singularity storage create s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + singularity storage create s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others USAGE: singularity storage create s3 command [command options] @@ -17,21 +17,28 @@ COMMANDS: cloudflare Cloudflare R2 Storage digitalocean DigitalOcean Spaces dreamhost Dreamhost DreamObjects + gcs Google Cloud Storage huaweiobs Huawei Object Storage Service ibmcos IBM COS S3 idrive IDrive e2 ionos IONOS Cloud + leviia Leviia Object Storage liara Liara Object Storage + linode Linode Object Storage lyvecloud Seagate Lyve Cloud + magalu Magalu Object Storage minio Minio Object Storage netease Netease Object Storage (NOS) other Any other S3 compatible provider + petabox Petabox Object Storage qiniu Qiniu Object Storage (Kodo) rackcorp RackCorp Object Storage + rclone Rclone S3 Server scaleway Scaleway Object Storage seaweedfs SeaweedFS S3 stackpath StackPath Object Storage storj Storj (S3 Compatible Gateway) + synology Synology C2 Object Storage tencentcos Tencent Cloud Object Storage (COS) wasabi Wasabi Object Storage help, h Shows a list of commands or help for one command diff --git a/docs/en/cli-reference/storage/create/s3/alibaba.md b/docs/en/cli-reference/storage/create/s3/alibaba.md index ee3a0a24..4e3dc771 100644 --- a/docs/en/cli-reference/storage/create/s3/alibaba.md +++ b/docs/en/cli-reference/storage/create/s3/alibaba.md @@ -196,10 +196,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -216,6 +216,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -225,6 +229,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -309,13 +318,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -333,12 +339,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -366,6 +389,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -400,9 +434,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -415,36 +522,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -459,7 +575,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/arvancloud.md b/docs/en/cli-reference/storage/create/s3/arvancloud.md index 31470a5f..06077910 100644 --- a/docs/en/cli-reference/storage/create/s3/arvancloud.md +++ b/docs/en/cli-reference/storage/create/s3/arvancloud.md @@ -32,9 +32,9 @@ DESCRIPTION: Endpoint for Arvan Cloud Object Storage (AOS) API. Examples: - | s3.ir-thr-at1.arvanstorage.com | The default endpoint - a good choice if you are unsure. - | | Tehran Iran (Asiatech) - | s3.ir-tbz-sh1.arvanstorage.com | Tabriz Iran (Shahriar) + | s3.ir-thr-at1.arvanstorage.ir | The default endpoint - a good choice if you are unsure. + | | Tehran Iran (Simin) + | s3.ir-tbz-sh1.arvanstorage.ir | Tabriz Iran (Shahriar) --location-constraint Location constraint - must match endpoint. @@ -42,7 +42,7 @@ DESCRIPTION: Used when creating buckets only. Examples: - | ir-thr-at1 | Tehran Iran (Asiatech) + | ir-thr-at1 | Tehran Iran (Simin) | ir-tbz-sh1 | Tabriz Iran (Shahriar) --acl @@ -180,10 +180,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -200,6 +200,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -209,6 +213,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -293,13 +302,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -317,12 +323,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -350,6 +373,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -384,9 +418,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -400,36 +507,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -444,7 +560,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/aws.md b/docs/en/cli-reference/storage/create/s3/aws.md index 3cb09d3b..2a8a2120 100644 --- a/docs/en/cli-reference/storage/create/s3/aws.md +++ b/docs/en/cli-reference/storage/create/s3/aws.md @@ -71,6 +71,8 @@ DESCRIPTION: | | Needs location constraint ap-east-1. | sa-east-1 | South America (Sao Paulo) Region. | | Needs location constraint sa-east-1. + | il-central-1 | Israel (Tel Aviv) Region. + | | Needs location constraint il-central-1. | me-south-1 | Middle East (Bahrain) Region. | | Needs location constraint me-south-1. | af-south-1 | Africa (Cape Town) Region. @@ -114,6 +116,7 @@ DESCRIPTION: | ap-south-1 | Asia Pacific (Mumbai) Region | ap-east-1 | Asia Pacific (Hong Kong) Region | sa-east-1 | South America (Sao Paulo) Region + | il-central-1 | Israel (Tel Aviv) Region | me-south-1 | Middle East (Bahrain) Region | af-south-1 | Africa (Cape Town) Region | cn-north-1 | China (Beijing) Region @@ -313,10 +316,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -333,6 +336,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -342,6 +349,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. @@ -439,13 +451,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -463,12 +472,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -496,6 +522,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -530,14 +567,87 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata --sts-endpoint - Endpoint for STS. + Endpoint for STS (deprecated). Leave blank if using AWS to use the default endpoint for the region. + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -554,44 +664,53 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --requester-pays Enables requester pays option when interacting with S3 bucket. (default: false) [$REQUESTER_PAYS] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --sts-endpoint value Endpoint for STS. [$STS_ENDPOINT] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. (default: false) [$USE_ACCELERATE_ENDPOINT] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --requester-pays Enables requester pays option when interacting with S3 bucket. (default: false) [$REQUESTER_PAYS] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --sts-endpoint value Endpoint for STS (deprecated). [$STS_ENDPOINT] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. (default: false) [$USE_ACCELERATE_ENDPOINT] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -606,7 +725,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/ceph.md b/docs/en/cli-reference/storage/create/s3/ceph.md index ca575e2d..c3d3e785 100644 --- a/docs/en/cli-reference/storage/create/s3/ceph.md +++ b/docs/en/cli-reference/storage/create/s3/ceph.md @@ -224,10 +224,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -244,6 +244,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -253,6 +257,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -337,13 +346,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -361,12 +367,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -394,6 +417,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -428,9 +462,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -446,40 +553,49 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -494,7 +610,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/chinamobile.md b/docs/en/cli-reference/storage/create/s3/chinamobile.md index 24ccf641..eb477b95 100644 --- a/docs/en/cli-reference/storage/create/s3/chinamobile.md +++ b/docs/en/cli-reference/storage/create/s3/chinamobile.md @@ -278,10 +278,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -298,6 +298,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -307,6 +311,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -391,13 +400,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -415,12 +421,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -448,6 +471,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -482,9 +516,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -499,40 +606,49 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -547,7 +663,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/cloudflare.md b/docs/en/cli-reference/storage/create/s3/cloudflare.md index 6312305d..0575fe6d 100644 --- a/docs/en/cli-reference/storage/create/s3/cloudflare.md +++ b/docs/en/cli-reference/storage/create/s3/cloudflare.md @@ -154,10 +154,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -174,6 +174,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -183,6 +187,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -267,13 +276,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -291,12 +297,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -324,6 +347,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -358,9 +392,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -372,36 +479,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -416,7 +532,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/digitalocean.md b/docs/en/cli-reference/storage/create/s3/digitalocean.md index 85e5de55..cc84b3c8 100644 --- a/docs/en/cli-reference/storage/create/s3/digitalocean.md +++ b/docs/en/cli-reference/storage/create/s3/digitalocean.md @@ -186,10 +186,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -206,6 +206,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -215,6 +219,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -299,13 +308,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -323,12 +329,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -356,6 +379,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -390,9 +424,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -406,36 +513,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -450,7 +566,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/dreamhost.md b/docs/en/cli-reference/storage/create/s3/dreamhost.md index 16f8aafe..e7913d4f 100644 --- a/docs/en/cli-reference/storage/create/s3/dreamhost.md +++ b/docs/en/cli-reference/storage/create/s3/dreamhost.md @@ -181,10 +181,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -201,6 +201,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -210,6 +214,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -294,13 +303,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -318,12 +324,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -351,6 +374,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -385,9 +419,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -401,36 +508,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -445,7 +561,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/gcs.md b/docs/en/cli-reference/storage/create/s3/gcs.md new file mode 100644 index 00000000..4a819c7b --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/gcs.md @@ -0,0 +1,579 @@ +# Google Cloud Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 gcs - Google Cloud Storage + +USAGE: + singularity storage create s3 gcs [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | | Use this if unsure. + | | Will use v4 signatures and an empty region. + | other-v2-signature | Use this only if v4 signatures don't work. + | | E.g. pre Jewel/v10 CEPH. + + --endpoint + Endpoint for Google Cloud Storage. + + Examples: + | https://storage.googleapis.com | Google Cloud Storage endpoint + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for Google Cloud Storage. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/huaweiobs.md b/docs/en/cli-reference/storage/create/s3/huaweiobs.md index ecde891c..fdc201d4 100644 --- a/docs/en/cli-reference/storage/create/s3/huaweiobs.md +++ b/docs/en/cli-reference/storage/create/s3/huaweiobs.md @@ -198,10 +198,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -218,6 +218,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -227,6 +231,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -311,13 +320,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -335,12 +341,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -368,6 +391,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -402,9 +436,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -417,36 +524,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -461,7 +577,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/ibmcos.md b/docs/en/cli-reference/storage/create/s3/ibmcos.md index 3bd95f04..0c2d08d4 100644 --- a/docs/en/cli-reference/storage/create/s3/ibmcos.md +++ b/docs/en/cli-reference/storage/create/s3/ibmcos.md @@ -291,10 +291,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -311,6 +311,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -320,6 +324,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -404,13 +413,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -428,12 +434,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -461,6 +484,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -495,9 +529,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -511,36 +618,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -555,7 +671,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/idrive.md b/docs/en/cli-reference/storage/create/s3/idrive.md index 2b60185c..e13e3639 100644 --- a/docs/en/cli-reference/storage/create/s3/idrive.md +++ b/docs/en/cli-reference/storage/create/s3/idrive.md @@ -157,10 +157,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -177,6 +177,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -186,6 +190,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -270,13 +279,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -294,12 +300,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -327,6 +350,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -361,9 +395,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -374,36 +481,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -418,7 +534,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/ionos.md b/docs/en/cli-reference/storage/create/s3/ionos.md index c14987fe..ee3eef61 100644 --- a/docs/en/cli-reference/storage/create/s3/ionos.md +++ b/docs/en/cli-reference/storage/create/s3/ionos.md @@ -176,10 +176,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -196,6 +196,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -205,6 +209,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -289,13 +298,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -313,12 +319,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -346,6 +369,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -380,9 +414,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -395,36 +502,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -439,7 +555,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/leviia.md b/docs/en/cli-reference/storage/create/s3/leviia.md new file mode 100644 index 00000000..6dd29ed2 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/leviia.md @@ -0,0 +1,572 @@ +# Leviia Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 leviia - Leviia Object Storage + +USAGE: + singularity storage create s3 leviia [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | | Use this if unsure. + | | Will use v4 signatures and an empty region. + | other-v2-signature | Use this only if v4 signatures don't work. + | | E.g. pre Jewel/v10 CEPH. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/liara.md b/docs/en/cli-reference/storage/create/s3/liara.md index 3a6258af..e814d642 100644 --- a/docs/en/cli-reference/storage/create/s3/liara.md +++ b/docs/en/cli-reference/storage/create/s3/liara.md @@ -170,10 +170,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -190,6 +190,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -199,6 +203,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -283,13 +292,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -307,12 +313,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -340,6 +363,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -374,9 +408,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -389,36 +496,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -433,7 +549,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/linode.md b/docs/en/cli-reference/storage/create/s3/linode.md new file mode 100644 index 00000000..7ac98707 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/linode.md @@ -0,0 +1,570 @@ +# Linode Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 linode - Linode Object Storage + +USAGE: + singularity storage create s3 linode [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for Linode Object Storage API. + + Examples: + | us-southeast-1.linodeobjects.com | Atlanta, GA (USA), us-southeast-1 + | us-ord-1.linodeobjects.com | Chicago, IL (USA), us-ord-1 + | eu-central-1.linodeobjects.com | Frankfurt (Germany), eu-central-1 + | it-mil-1.linodeobjects.com | Milan (Italy), it-mil-1 + | us-east-1.linodeobjects.com | Newark, NJ (USA), us-east-1 + | fr-par-1.linodeobjects.com | Paris (France), fr-par-1 + | us-sea-1.linodeobjects.com | Seattle, WA (USA), us-sea-1 + | ap-south-1.linodeobjects.com | Singapore ap-south-1 + | se-sto-1.linodeobjects.com | Stockholm (Sweden), se-sto-1 + | us-iad-1.linodeobjects.com | Washington, DC, (USA), us-iad-1 + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for Linode Object Storage API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/lyvecloud.md b/docs/en/cli-reference/storage/create/s3/lyvecloud.md index b9083b1c..c2f8799c 100644 --- a/docs/en/cli-reference/storage/create/s3/lyvecloud.md +++ b/docs/en/cli-reference/storage/create/s3/lyvecloud.md @@ -183,10 +183,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -203,6 +203,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -212,6 +216,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -296,13 +305,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -320,12 +326,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -353,6 +376,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -387,9 +421,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -403,36 +510,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -447,7 +563,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/magalu.md b/docs/en/cli-reference/storage/create/s3/magalu.md new file mode 100644 index 00000000..a0ba72d0 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/magalu.md @@ -0,0 +1,571 @@ +# Magalu Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 magalu - Magalu Object Storage + +USAGE: + singularity storage create s3 magalu [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | br-se1.magaluobjects.com | Magalu BR Southeast 1 endpoint + | br-ne1.magaluobjects.com | Magalu BR Northeast 1 endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --storage-class + The storage class to use when storing new objects in Magalu. + + Examples: + | STANDARD | Standard storage class + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + --storage-class value The storage class to use when storing new objects in Magalu. [$STORAGE_CLASS] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/minio.md b/docs/en/cli-reference/storage/create/s3/minio.md index 85af6a18..c09affbd 100644 --- a/docs/en/cli-reference/storage/create/s3/minio.md +++ b/docs/en/cli-reference/storage/create/s3/minio.md @@ -224,10 +224,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -244,6 +244,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -253,6 +257,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -337,13 +346,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -361,12 +367,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -394,6 +417,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -428,9 +462,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -446,40 +553,49 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -494,7 +610,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/netease.md b/docs/en/cli-reference/storage/create/s3/netease.md index af7c312f..a8ccaf88 100644 --- a/docs/en/cli-reference/storage/create/s3/netease.md +++ b/docs/en/cli-reference/storage/create/s3/netease.md @@ -178,10 +178,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -198,6 +198,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -207,6 +211,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -291,13 +300,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -315,12 +321,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -348,6 +371,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -382,9 +416,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -398,36 +505,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -442,7 +558,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/other.md b/docs/en/cli-reference/storage/create/s3/other.md index fa36d1c3..00462e42 100644 --- a/docs/en/cli-reference/storage/create/s3/other.md +++ b/docs/en/cli-reference/storage/create/s3/other.md @@ -178,10 +178,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -198,6 +198,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -207,6 +211,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -291,13 +300,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -315,12 +321,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -348,6 +371,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -382,9 +416,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -398,36 +505,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -442,7 +558,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/petabox.md b/docs/en/cli-reference/storage/create/s3/petabox.md new file mode 100644 index 00000000..1fc88e0c --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/petabox.md @@ -0,0 +1,580 @@ +# Petabox Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 petabox - Petabox Object Storage + +USAGE: + singularity storage create s3 petabox [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region where your bucket will be created and your data stored. + + + Examples: + | us-east-1 | US East (N. Virginia) + | eu-central-1 | Europe (Frankfurt) + | ap-southeast-1 | Asia Pacific (Singapore) + | me-south-1 | Middle East (Bahrain) + | sa-east-1 | South America (São Paulo) + + --endpoint + Endpoint for Petabox S3 Object Storage. + + Specify the endpoint from the same region. + + Examples: + | s3.petabox.io | US East (N. Virginia) + | s3.us-east-1.petabox.io | US East (N. Virginia) + | s3.eu-central-1.petabox.io | Europe (Frankfurt) + | s3.ap-southeast-1.petabox.io | Asia Pacific (Singapore) + | s3.me-south-1.petabox.io | Middle East (Bahrain) + | s3.sa-east-1.petabox.io | South America (São Paulo) + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for Petabox S3 Object Storage. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region where your bucket will be created and your data stored. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/qiniu.md b/docs/en/cli-reference/storage/create/s3/qiniu.md index ab20a5a0..f8b38875 100644 --- a/docs/en/cli-reference/storage/create/s3/qiniu.md +++ b/docs/en/cli-reference/storage/create/s3/qiniu.md @@ -212,10 +212,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -232,6 +232,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -241,6 +245,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -325,13 +334,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -349,12 +355,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -382,6 +405,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -416,9 +450,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -433,36 +540,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -477,7 +593,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/rackcorp.md b/docs/en/cli-reference/storage/create/s3/rackcorp.md index a9c89fef..cf48c86d 100644 --- a/docs/en/cli-reference/storage/create/s3/rackcorp.md +++ b/docs/en/cli-reference/storage/create/s3/rackcorp.md @@ -231,10 +231,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -251,6 +251,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -260,6 +264,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -344,13 +353,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -368,12 +374,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -401,6 +424,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -435,9 +469,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -451,36 +558,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -495,7 +611,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/rclone.md b/docs/en/cli-reference/storage/create/s3/rclone.md new file mode 100644 index 00000000..bca8fbf5 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/rclone.md @@ -0,0 +1,578 @@ +# Rclone S3 Server + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 rclone - Rclone S3 Server + +USAGE: + singularity storage create s3 rclone [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | | Use this if unsure. + | | Will use v4 signatures and an empty region. + | other-v2-signature | Use this only if v4 signatures don't work. + | | E.g. pre Jewel/v10 CEPH. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/scaleway.md b/docs/en/cli-reference/storage/create/s3/scaleway.md index 09009e82..6d0dfc16 100644 --- a/docs/en/cli-reference/storage/create/s3/scaleway.md +++ b/docs/en/cli-reference/storage/create/s3/scaleway.md @@ -85,11 +85,16 @@ DESCRIPTION: The storage class to use when storing new objects in S3. Examples: - | | Default. - | STANDARD | The Standard class for any upload. - | | Suitable for on-demand content like streaming or CDN. - | GLACIER | Archived storage. - | | Prices are lower, but it needs to be restored first to be accessed. + | | Default. + | STANDARD | The Standard class for any upload. + | | Suitable for on-demand content like streaming or CDN. + | | Available in all regions. + | GLACIER | Archived storage. + | | Prices are lower, but it needs to be restored first to be accessed. + | | Available in FR-PAR and NL-AMS regions. + | ONEZONE_IA | One Zone - Infrequent Access. + | | A good choice for storing secondary backup copies or easily re-creatable data. + | | Available in the FR-PAR region only. --upload-cutoff Cutoff for switching to chunked upload. @@ -183,10 +188,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -203,6 +208,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -212,6 +221,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -296,13 +310,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -320,12 +331,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -353,6 +381,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -387,9 +426,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -403,36 +515,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -447,7 +568,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/seaweedfs.md b/docs/en/cli-reference/storage/create/s3/seaweedfs.md index b4c8b1b2..7358220e 100644 --- a/docs/en/cli-reference/storage/create/s3/seaweedfs.md +++ b/docs/en/cli-reference/storage/create/s3/seaweedfs.md @@ -181,10 +181,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -201,6 +201,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -210,6 +214,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -294,13 +303,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -318,12 +324,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -351,6 +374,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -385,9 +419,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -401,36 +508,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -445,7 +561,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/stackpath.md b/docs/en/cli-reference/storage/create/s3/stackpath.md index 02d0db9f..bcb5ccd0 100644 --- a/docs/en/cli-reference/storage/create/s3/stackpath.md +++ b/docs/en/cli-reference/storage/create/s3/stackpath.md @@ -176,10 +176,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -196,6 +196,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -205,6 +209,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -289,13 +298,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -313,12 +319,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -346,6 +369,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -380,9 +414,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -395,36 +502,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -439,7 +555,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/storj.md b/docs/en/cli-reference/storage/create/s3/storj.md index fecd76ae..689b4bd6 100644 --- a/docs/en/cli-reference/storage/create/s3/storj.md +++ b/docs/en/cli-reference/storage/create/s3/storj.md @@ -149,10 +149,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -169,6 +169,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -178,6 +182,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -262,13 +271,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -286,12 +292,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -319,6 +342,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -353,9 +387,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -366,36 +473,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -410,7 +526,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/synology.md b/docs/en/cli-reference/storage/create/s3/synology.md new file mode 100644 index 00000000..358f2fa8 --- /dev/null +++ b/docs/en/cli-reference/storage/create/s3/synology.md @@ -0,0 +1,568 @@ +# Synology C2 Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage create s3 synology - Synology C2 Object Storage + +USAGE: + singularity storage create s3 synology [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region where your data stored. + + + Examples: + | eu-001 | Europe Region 1 + | eu-002 | Europe Region 2 + | us-001 | US Region 1 + | us-002 | US Region 2 + | tw-001 | Asia (Taiwan) + + --endpoint + Endpoint for Synology C2 Object Storage API. + + Examples: + | eu-001.s3.synologyc2.net | EU Endpoint 1 + | eu-002.s3.synologyc2.net | EU Endpoint 2 + | us-001.s3.synologyc2.net | US Endpoint 1 + | us-002.s3.synologyc2.net | US Endpoint 2 + | tw-001.s3.synologyc2.net | TW Endpoint 1 + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for Synology C2 Object Storage API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region where your data stored. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value) + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) + + General + + --name value Name of the storage (default: Auto generated) + --path value Path of the storage + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/create/s3/tencentcos.md b/docs/en/cli-reference/storage/create/s3/tencentcos.md index d2767d7b..9ecc4ddf 100644 --- a/docs/en/cli-reference/storage/create/s3/tencentcos.md +++ b/docs/en/cli-reference/storage/create/s3/tencentcos.md @@ -194,10 +194,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -214,6 +214,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -223,6 +227,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -307,13 +316,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -331,12 +337,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -364,6 +387,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -398,9 +432,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -413,36 +520,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -457,7 +573,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/s3/wasabi.md b/docs/en/cli-reference/storage/create/s3/wasabi.md index 0d487196..090c5587 100644 --- a/docs/en/cli-reference/storage/create/s3/wasabi.md +++ b/docs/en/cli-reference/storage/create/s3/wasabi.md @@ -193,10 +193,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -213,6 +213,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -222,6 +226,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -306,13 +315,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -330,12 +336,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -363,6 +386,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -397,9 +431,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -413,36 +520,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -457,7 +573,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/seafile.md b/docs/en/cli-reference/storage/create/seafile.md index cdb104bd..3b1d30ee 100644 --- a/docs/en/cli-reference/storage/create/seafile.md +++ b/docs/en/cli-reference/storage/create/seafile.md @@ -45,6 +45,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --2fa Two-factor authentication ('true' if the account has 2FA enabled). (default: false) [$2FA] @@ -58,8 +61,9 @@ OPTIONS: Advanced - --create-library Should rclone create a library if it doesn't exist. (default: false) [$CREATE_LIBRARY] - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8") [$ENCODING] + --create-library Should rclone create a library if it doesn't exist. (default: false) [$CREATE_LIBRARY] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8") [$ENCODING] Client Config @@ -74,7 +78,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/sftp.md b/docs/en/cli-reference/storage/create/sftp.md index 9e92d021..1cbec753 100644 --- a/docs/en/cli-reference/storage/create/sftp.md +++ b/docs/en/cli-reference/storage/create/sftp.md @@ -26,7 +26,15 @@ DESCRIPTION: --key-pem Raw PEM-encoded private key. - If specified, will override key_file parameter. + Note that this should be on a single line with line endings replaced with '\n', eg + + key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY----- + + This will generate the single line correctly: + + awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa + + If specified, it will override the key_file parameter. --key-file Path to PEM-encoded private key file. @@ -112,6 +120,18 @@ DESCRIPTION: E.g. if home directory can be found in a shared folder called "home": rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory + + To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path. + + E.g. the first example above could be rewritten as: + + rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2 + + Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home". + + E.g. the second example above should be rewritten as: + + rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1 --set-modtime Set the modified time on the remote if set. @@ -147,6 +167,15 @@ DESCRIPTION: Specifies the path or command to run a sftp server on the remote host. The subsystem option is ignored when server_command is defined. + + If adding server_command to the configuration file please note that + it should not be enclosed in quotes, since that will make rclone fail. + + A working example is: + + [remote_name] + type = sftp + server_command = sudo /usr/libexec/openssh/sftp-server --use-fstat If set use fstat instead of stat. @@ -226,6 +255,23 @@ DESCRIPTION: cost of using more memory. + --connections + Maximum number of SFTP simultaneous connections, 0 for unlimited. + + Note that setting this is very likely to cause deadlocks so it should + be used with care. + + If you are doing a sync or copy then make sure connections is one more + than the sum of `--transfers` and `--checkers`. + + If you use `--check-first` then it just needs to be one more than the + maximum of `--checkers` and `--transfers`. + + So for `connections 3` you'd use `--checkers 2 --transfers 2 + --check-first` or `--checkers 1 --transfers 1`. + + + --set-env Environment variables to pass to sftp and commands @@ -239,7 +285,7 @@ DESCRIPTION: VAR1=value VAR2=value - and pass variables with spaces in in quotes, eg + and pass variables with spaces in quotes, eg "VAR3=value with space" "VAR4=value with space" VAR5=nospacehere @@ -279,6 +325,77 @@ DESCRIPTION: umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com + --host-key-algorithms + Space separated list of host key algorithms, ordered by preference. + + At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms. + + Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled. + + Example: + + ssh-ed25519 ssh-rsa ssh-dss + + + --ssh + Path and arguments to external ssh binary. + + Normally rclone will use its internal ssh library to connect to the + SFTP server. However it does not implement all possible ssh options so + it may be desirable to use an external ssh binary. + + Rclone ignores all the internal config if you use this option and + expects you to configure the ssh binary with the user/host/port and + any other options you need. + + **Important** The ssh command must log in without asking for a + password so needs to be configured with keys or certificates. + + Rclone will run the command supplied either with the additional + arguments "-s sftp" to access the SFTP subsystem or with commands such + as "md5sum /path/to/file" appended to read checksums. + + Any arguments with spaces in should be surrounded by "double quotes". + + An example setting might be: + + ssh -o ServerAliveInterval=20 user@example.com + + Note that when using an external ssh binary rclone makes a new ssh + connection for every hash it calculates. + + + --socks-proxy + Socks 5 proxy host. + + Supports the format user:pass@host:port, user@host:port, host:port. + + Example: + + myUser:myPass@localhost:9005 + + + --copy-is-hardlink + Set to enable server side copies using hardlinks. + + The SFTP protocol does not define a copy command so normally server + side copies are not allowed with the sftp backend. + + However the SFTP protocol does support hardlinking, and if you enable + this flag then the sftp backend will support server side copies. These + will be implemented by doing a hardlink from the source to the + destination. + + Not all sftp servers support this. + + Note that hardlinking two files together will use no additional space + as the source and the destination will be the same file. + + This feature may be useful backups made with --copy-dest. + + --description + Description of the remote. + OPTIONS: --disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available. (default: false) [$DISABLE_HASHCHECK] @@ -291,6 +408,7 @@ OPTIONS: --pass value SSH password, leave blank to use ssh-agent. [$PASS] --port value SSH port number. (default: 22) [$PORT] --pubkey-file value Optional path to public key file. [$PUBKEY_FILE] + --ssh value Path and arguments to external ssh binary. [$SSH] --use-insecure-cipher Enable the use of insecure ciphers and key exchange methods. (default: false) [$USE_INSECURE_CIPHER] --user value SSH username. (default: "$USER") [$USER] @@ -300,8 +418,12 @@ OPTIONS: --chunk-size value Upload and download chunk size. (default: "32Ki") [$CHUNK_SIZE] --ciphers value Space separated list of ciphers to be used for session encryption, ordered by preference. [$CIPHERS] --concurrency value The maximum number of outstanding requests for one file (default: 64) [$CONCURRENCY] + --connections value Maximum number of SFTP simultaneous connections, 0 for unlimited. (default: 0) [$CONNECTIONS] + --copy-is-hardlink Set to enable server side copies using hardlinks. (default: false) [$COPY_IS_HARDLINK] + --description value Description of the remote. [$DESCRIPTION] --disable-concurrent-reads If set don't use concurrent reads. (default: false) [$DISABLE_CONCURRENT_READS] --disable-concurrent-writes If set don't use concurrent writes. (default: false) [$DISABLE_CONCURRENT_WRITES] + --host-key-algorithms value Space separated list of host key algorithms, ordered by preference. [$HOST_KEY_ALGORITHMS] --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] --key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$KEY_EXCHANGE] --known-hosts-file value Optional path to known_hosts file. [$KNOWN_HOSTS_FILE] @@ -314,6 +436,7 @@ OPTIONS: --sha1sum-command value The command used to read sha1 hashes. [$SHA1SUM_COMMAND] --shell-type value The type of SSH shell on remote server, if any. [$SHELL_TYPE] --skip-links Set to skip any symlinks and any other non regular files. (default: false) [$SKIP_LINKS] + --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] --subsystem value Specifies the SSH2 subsystem on the remote host. (default: "sftp") [$SUBSYSTEM] --use-fstat If set use fstat instead of stat. (default: false) [$USE_FSTAT] @@ -330,7 +453,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/sharefile.md b/docs/en/cli-reference/storage/create/sharefile.md index d603a446..8429f03b 100644 --- a/docs/en/cli-reference/storage/create/sharefile.md +++ b/docs/en/cli-reference/storage/create/sharefile.md @@ -9,6 +9,29 @@ USAGE: singularity storage create sharefile [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --upload-cutoff Cutoff for switching to multipart upload. @@ -47,16 +70,25 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] --help, -h show help --root-folder-id value ID of the root folder. [$ROOT_FOLDER_ID] Advanced + --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "64Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --endpoint value Endpoint for API calls. [$ENDPOINT] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] --upload-cutoff value Cutoff for switching to multipart upload. (default: "128Mi") [$UPLOAD_CUTOFF] Client Config @@ -72,7 +104,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/sia.md b/docs/en/cli-reference/storage/create/sia.md index 5f8a8e16..adf5597a 100644 --- a/docs/en/cli-reference/storage/create/sia.md +++ b/docs/en/cli-reference/storage/create/sia.md @@ -30,6 +30,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --api-password value Sia Daemon API Password. [$API_PASSWORD] @@ -38,8 +41,9 @@ OPTIONS: Advanced - --encoding value The encoding for the backend. (default: "Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --user-agent value Siad User Agent (default: "Sia-Agent") [$USER_AGENT] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --user-agent value Siad User Agent (default: "Sia-Agent") [$USER_AGENT] Client Config @@ -54,7 +58,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/smb.md b/docs/en/cli-reference/storage/create/smb.md index f50cfe5f..27b20ab8 100644 --- a/docs/en/cli-reference/storage/create/smb.md +++ b/docs/en/cli-reference/storage/create/smb.md @@ -59,6 +59,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --domain value Domain name for NTLM authentication. (default: "WORKGROUP") [$DOMAIN] @@ -72,6 +75,7 @@ OPTIONS: Advanced --case-insensitive Whether the server is configured to be case-insensitive. (default: true) [$CASE_INSENSITIVE] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access. (default: true) [$HIDE_SPECIAL_SHARE] --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] @@ -89,7 +93,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/storj/existing.md b/docs/en/cli-reference/storage/create/storj/existing.md index 1b25d504..7498e901 100644 --- a/docs/en/cli-reference/storage/create/storj/existing.md +++ b/docs/en/cli-reference/storage/create/storj/existing.md @@ -12,11 +12,18 @@ DESCRIPTION: --access-grant Access grant. + --description + Description of the remote. + OPTIONS: --access-grant value Access grant. [$ACCESS_GRANT] --help, -h show help + Advanced + + --description value Description of the remote. [$DESCRIPTION] + Client Config --client-ca-cert value Path to CA certificate used to verify servers @@ -30,7 +37,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/storj/new.md b/docs/en/cli-reference/storage/create/storj/new.md index ace2c3b3..0325c617 100644 --- a/docs/en/cli-reference/storage/create/storj/new.md +++ b/docs/en/cli-reference/storage/create/storj/new.md @@ -27,6 +27,9 @@ DESCRIPTION: To access existing objects enter passphrase used for uploading. + --description + Description of the remote. + OPTIONS: --api-key value API key. [$API_KEY] @@ -34,6 +37,10 @@ OPTIONS: --passphrase value Encryption passphrase. [$PASSPHRASE] --satellite-address value Satellite address. (default: "us1.storj.io") [$SATELLITE_ADDRESS] + Advanced + + --description value Description of the remote. [$DESCRIPTION] + Client Config --client-ca-cert value Path to CA certificate used to verify servers @@ -47,7 +54,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/sugarsync.md b/docs/en/cli-reference/storage/create/sugarsync.md index f32221c6..24bfbb33 100644 --- a/docs/en/cli-reference/storage/create/sugarsync.md +++ b/docs/en/cli-reference/storage/create/sugarsync.md @@ -63,6 +63,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-key-id value Sugarsync Access Key ID. [$ACCESS_KEY_ID] @@ -76,6 +79,7 @@ OPTIONS: --authorization value Sugarsync authorization. [$AUTHORIZATION] --authorization-expiry value Sugarsync authorization expiry. [$AUTHORIZATION_EXPIRY] --deleted-id value Sugarsync deleted folder id. [$DELETED_ID] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Ctl,InvalidUtf8,Dot") [$ENCODING] --refresh-token value Sugarsync refresh token. [$REFRESH_TOKEN] --root-id value Sugarsync root id. [$ROOT_ID] @@ -94,7 +98,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/swift.md b/docs/en/cli-reference/storage/create/swift.md index 9dede104..212622b3 100644 --- a/docs/en/cli-reference/storage/create/swift.md +++ b/docs/en/cli-reference/storage/create/swift.md @@ -1,9 +1,9 @@ -# OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) +# OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) {% code fullWidth="true" %} ``` NAME: - singularity storage create swift - OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + singularity storage create swift - OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) USAGE: singularity storage create swift [command options] @@ -33,6 +33,7 @@ DESCRIPTION: | https://auth.storage.memset.com/v1.0 | Memset Memstore UK | https://auth.storage.memset.com/v2.0 | Memset Memstore UK v2 | https://auth.cloud.ovh.net/v3 | OVH + | https://authenticate.ain.net | Blomp Cloud Storage --user-id User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID). @@ -96,33 +97,67 @@ DESCRIPTION: | pcs | OVH Public Cloud Storage | pca | OVH Public Cloud Archive + --fetch-until-empty-page + When paginating, always fetch unless we received an empty page. + + Consider using this option if rclone listings show fewer objects + than expected, or if repeated syncs copy unchanged objects. + + It is safe to enable this, but rclone may make more API calls than + necessary. + + This is one of a pair of workarounds to handle implementations + of the Swift API that do not implement pagination as expected. See + also "partial_page_fetch_threshold". + + --partial-page-fetch-threshold + When paginating, fetch if the current page is within this percentage of the limit. + + Consider using this option if rclone listings show fewer objects + than expected, or if repeated syncs copy unchanged objects. + + It is safe to enable this, but rclone may make more API calls than + necessary. + + This is one of a pair of workarounds to handle implementations + of the Swift API that do not implement pagination as expected. See + also "fetch_until_empty_page". + --chunk-size - Above this size files will be chunked into a _segments container. + Above this size files will be chunked. + + Above this size files will be chunked into a a `_segments` container + or a `.file-segments` directory. (See the `use_segments_container` option + for more info). Default for this is 5 GiB which is its maximum value, which + means only files above this size will be chunked. + + Rclone uploads chunked files as dynamic large objects (DLO). - Above this size files will be chunked into a _segments container. The - default for this is 5 GiB which is its maximum value. --no-chunk Don't chunk files during streaming upload. - When doing streaming uploads (e.g. using rcat or mount) setting this - flag will cause the swift backend to not upload chunked files. + When doing streaming uploads (e.g. using `rcat` or `mount` with + `--vfs-cache-mode off`) setting this flag will cause the swift backend + to not upload chunked files. - This will limit the maximum upload size to 5 GiB. However non chunked - files are easier to deal with and have an MD5SUM. + This will limit the maximum streamed upload size to 5 GiB. This is + useful because non chunked files are easier to deal with and have an + MD5SUM. - Rclone will still chunk files bigger than chunk_size when doing normal - copy operations. + Rclone will still chunk files bigger than `chunk_size` when doing + normal copy operations. --no-large-objects Disable support for static and dynamic large objects Swift cannot transparently store files bigger than 5 GiB. There are - two schemes for doing that, static or dynamic large objects, and the - API does not allow rclone to determine whether a file is a static or - dynamic large object without doing a HEAD on the object. Since these - need to be treated differently, this means rclone has to issue HEAD - requests for objects for example when reading checksums. + two schemes for chunking large files, static large objects (SLO) or + dynamic large objects (DLO), and the API does not allow rclone to + determine whether a file is a static or dynamic large object without + doing a HEAD on the object. Since these need to be treated + differently, this means rclone has to issue HEAD requests for objects + for example when reading checksums. When `no_large_objects` is set, rclone will assume that there are no static or dynamic large objects stored. This means it can stop doing @@ -133,16 +168,45 @@ DESCRIPTION: uploaded in chunks, so files bigger than 5 GiB will just fail on upload. - If you set this option and there *are* static or dynamic large objects, + If you set this option and there **are** static or dynamic large objects, then this will give incorrect hashes for them. Downloads will succeed, but other operations such as Remove and Copy will fail. + --use-segments-container + Choose destination for large object segments + + Swift cannot transparently store files bigger than 5 GiB and rclone + will chunk files larger than `chunk_size` (default 5 GiB) in order to + upload them. + + If this value is `true` the chunks will be stored in an additional + container named the same as the destination container but with + `_segments` appended. This means that there won't be any duplicated + data in the original container but having another container may not be + acceptable. + + If this value is `false` the chunks will be stored in a + `.file-segments` directory in the root of the container. This + directory will be omitted when listing the container. Some + providers (eg Blomp) require this mode as creating additional + containers isn't allowed. If it is desired to see the `.file-segments` + directory in the root then this flag must be set to `true`. + + If this value is `unset` (the default), then rclone will choose the value + to use. It will be `false` unless rclone detects any `auth_url`s that + it knows need it to be `true`. In this case you'll see a message in + the DEBUG log. + + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --application-credential-id value Application Credential ID (OS_APPLICATION_CREDENTIAL_ID). [$APPLICATION_CREDENTIAL_ID] @@ -167,11 +231,15 @@ OPTIONS: Advanced - --chunk-size value Above this size files will be chunked into a _segments container. (default: "5Gi") [$CHUNK_SIZE] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure. (default: false) [$LEAVE_PARTS_ON_ERROR] - --no-chunk Don't chunk files during streaming upload. (default: false) [$NO_CHUNK] - --no-large-objects Disable support for static and dynamic large objects (default: false) [$NO_LARGE_OBJECTS] + --chunk-size value Above this size files will be chunked. (default: "5Gi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8") [$ENCODING] + --fetch-until-empty-page When paginating, always fetch unless we received an empty page. (default: false) [$FETCH_UNTIL_EMPTY_PAGE] + --leave-parts-on-error If true avoid calling abort upload on a failure. (default: false) [$LEAVE_PARTS_ON_ERROR] + --no-chunk Don't chunk files during streaming upload. (default: false) [$NO_CHUNK] + --no-large-objects Disable support for static and dynamic large objects (default: false) [$NO_LARGE_OBJECTS] + --partial-page-fetch-threshold value When paginating, fetch if the current page is within this percentage of the limit. (default: 0) [$PARTIAL_PAGE_FETCH_THRESHOLD] + --use-segments-container value Choose destination for large object segments (default: "unset") [$USE_SEGMENTS_CONTAINER] Client Config @@ -186,7 +254,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/union.md b/docs/en/cli-reference/storage/create/union.md index 17ea2136..2f20fa9b 100644 --- a/docs/en/cli-reference/storage/create/union.md +++ b/docs/en/cli-reference/storage/create/union.md @@ -34,6 +34,9 @@ DESCRIPTION: If a remote has less than this much free space then it won't be considered for use in lfs or eplfs policies. + --description + Description of the remote. + OPTIONS: --action-policy value Policy to choose upstream on ACTION category. (default: "epall") [$ACTION_POLICY] @@ -45,6 +48,7 @@ OPTIONS: Advanced + --description value Description of the remote. [$DESCRIPTION] --min-free-space value Minimum viable free space for lfs/eplfs policies. (default: "1Gi") [$MIN_FREE_SPACE] Client Config @@ -60,7 +64,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/uptobox.md b/docs/en/cli-reference/storage/create/uptobox.md index c38657f3..6dcca5ee 100644 --- a/docs/en/cli-reference/storage/create/uptobox.md +++ b/docs/en/cli-reference/storage/create/uptobox.md @@ -14,11 +14,17 @@ DESCRIPTION: Get it from https://uptobox.com/my_account. + --private + Set to make uploaded files private + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-token value Your access token. [$ACCESS_TOKEN] @@ -26,7 +32,9 @@ OPTIONS: Advanced - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] + --private Set to make uploaded files private (default: false) [$PRIVATE] Client Config @@ -41,7 +49,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/webdav.md b/docs/en/cli-reference/storage/create/webdav.md index c48e469f..61570883 100644 --- a/docs/en/cli-reference/storage/create/webdav.md +++ b/docs/en/cli-reference/storage/create/webdav.md @@ -18,10 +18,12 @@ DESCRIPTION: Name of the WebDAV site/service/software you are using. Examples: + | fastmail | Fastmail Files | nextcloud | Nextcloud | owncloud | Owncloud | sharepoint | Sharepoint Online, authenticated by Microsoft account | sharepoint-ntlm | Sharepoint with NTLM authentication, usually self-hosted or on-premises + | rclone | rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol | other | Other site/service or software --user @@ -58,6 +60,30 @@ DESCRIPTION: You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'. + --pacer-min-sleep + Minimum time to sleep between API calls. + + --nextcloud-chunk-size + Nextcloud upload chunk size. + + We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances. + See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side + + Set to 0 to disable chunked uploading. + + + --owncloud-exclude-shares + Exclude ownCloud shares + + --owncloud-exclude-mounts + Exclude ownCloud mounted storages + + --unix-socket + Path to a unix domain socket to dial to, instead of opening a TCP connection directly + + --description + Description of the remote. + OPTIONS: --bearer-token value Bearer token instead of user/pass (e.g. a Macaroon). [$BEARER_TOKEN] @@ -70,8 +96,14 @@ OPTIONS: Advanced --bearer-token-command value Command to run to get a bearer token. [$BEARER_TOKEN_COMMAND] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. [$ENCODING] --headers value Set HTTP headers for all transactions. [$HEADERS] + --nextcloud-chunk-size value Nextcloud upload chunk size. (default: "10Mi") [$NEXTCLOUD_CHUNK_SIZE] + --owncloud-exclude-mounts Exclude ownCloud mounted storages (default: false) [$OWNCLOUD_EXCLUDE_MOUNTS] + --owncloud-exclude-shares Exclude ownCloud shares (default: false) [$OWNCLOUD_EXCLUDE_SHARES] + --pacer-min-sleep value Minimum time to sleep between API calls. (default: "10ms") [$PACER_MIN_SLEEP] + --unix-socket value Path to a unix domain socket to dial to, instead of opening a TCP connection directly [$UNIX_SOCKET] Client Config @@ -86,7 +118,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/yandex.md b/docs/en/cli-reference/storage/create/yandex.md index 4d8e1bab..1a059e42 100644 --- a/docs/en/cli-reference/storage/create/yandex.md +++ b/docs/en/cli-reference/storage/create/yandex.md @@ -40,6 +40,12 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --spoof-ua + Set the user agent to match an official version of the yandex disk client. May help with upload performance. + + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -48,11 +54,13 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance. (default: true) [$SPOOF_UA] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -67,7 +75,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/create/zoho.md b/docs/en/cli-reference/storage/create/zoho.md index d8ae15c1..f4d04f9c 100644 --- a/docs/en/cli-reference/storage/create/zoho.md +++ b/docs/en/cli-reference/storage/create/zoho.md @@ -52,6 +52,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -61,10 +64,11 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -79,7 +83,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string (default: rclone default) General diff --git a/docs/en/cli-reference/storage/update/README.md b/docs/en/cli-reference/storage/update/README.md index 3eb7b95c..80eb6cba 100644 --- a/docs/en/cli-reference/storage/update/README.md +++ b/docs/en/cli-reference/storage/update/README.md @@ -9,7 +9,6 @@ USAGE: singularity storage update command [command options] COMMANDS: - acd Amazon Drive azureblob Microsoft Azure Blob Storage b2 Backblaze B2 box Box @@ -37,7 +36,7 @@ COMMANDS: premiumizeme premiumize.me putio Put.io qingstor QingCloud Object Storage - s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + s3 Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others seafile seafile sftp SSH/SFTP sharefile Citrix Sharefile @@ -45,7 +44,7 @@ COMMANDS: smb SMB / CIFS storj Storj Decentralized Cloud Storage sugarsync Sugarsync - swift OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + swift OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) union Union merges the contents of several upstream fs uptobox Uptobox webdav WebDAV diff --git a/docs/en/cli-reference/storage/update/acd.md b/docs/en/cli-reference/storage/update/acd.md deleted file mode 100644 index 757b5c85..00000000 --- a/docs/en/cli-reference/storage/update/acd.md +++ /dev/null @@ -1,119 +0,0 @@ -# Amazon Drive - -{% code fullWidth="true" %} -``` -NAME: - singularity storage update acd - Amazon Drive - -USAGE: - singularity storage update acd [command options] - -DESCRIPTION: - --client-id - OAuth Client Id. - - Leave blank normally. - - --client-secret - OAuth Client Secret. - - Leave blank normally. - - --token - OAuth Access Token as a JSON blob. - - --auth-url - Auth server URL. - - Leave blank to use the provider defaults. - - --token-url - Token server url. - - Leave blank to use the provider defaults. - - --checkpoint - Checkpoint for internal polling (debug). - - --upload-wait-per-gb - Additional time per GiB to wait after a failed complete upload to see if it appears. - - Sometimes Amazon Drive gives an error when a file has been fully - uploaded but the file appears anyway after a little while. This - happens sometimes for files over 1 GiB in size and nearly every time for - files bigger than 10 GiB. This parameter controls the time rclone waits - for the file to appear. - - The default value for this parameter is 3 minutes per GiB, so by - default it will wait 3 minutes for every GiB uploaded to see if the - file appears. - - You can disable this feature by setting it to 0. This may cause - conflict errors as rclone retries the failed upload but the file will - most likely appear correctly eventually. - - These values were determined empirically by observing lots of uploads - of big files for a range of file sizes. - - Upload with the "-v" flag to see more info about what rclone is doing - in this situation. - - --templink-threshold - Files >= this size will be downloaded via their tempLink. - - Files this size or more will be downloaded via their "tempLink". This - is to work around a problem with Amazon Drive which blocks downloads - of files bigger than about 10 GiB. The default for this is 9 GiB which - shouldn't need to be changed. - - To download files above this threshold, rclone requests a "tempLink" - which downloads the file through a temporary URL directly from the - underlying S3 storage. - - --encoding - The encoding for the backend. - - See the [encoding section in the overview](/overview/#encoding) for more info. - - -OPTIONS: - --client-id value OAuth Client Id. [$CLIENT_ID] - --client-secret value OAuth Client Secret. [$CLIENT_SECRET] - --help, -h show help - - Advanced - - --auth-url value Auth server URL. [$AUTH_URL] - --checkpoint value Checkpoint for internal polling (debug). [$CHECKPOINT] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --templink-threshold value Files >= this size will be downloaded via their tempLink. (default: "9Gi") [$TEMPLINK_THRESHOLD] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] - --upload-wait-per-gb value Additional time per GiB to wait after a failed complete upload to see if it appears. (default: "3m0s") [$UPLOAD_WAIT_PER_GB] - - Client Config - - --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. - --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. - --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) - --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) - --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" - --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) - --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. - --client-no-gzip Don't set Accept-Encoding: gzip (default: false) - --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) - --client-timeout value IO idle timeout (default: 5m0s) - --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) - - Retry Strategy - - --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) - --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) - --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) - --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) - --client-retry-max value Max number of retries for IO read errors (default: 10) - --client-skip-inaccessible Skip inaccessible files when opening (default: false) - -``` -{% endcode %} diff --git a/docs/en/cli-reference/storage/update/azureblob.md b/docs/en/cli-reference/storage/update/azureblob.md index 5df06c26..126ab09a 100644 --- a/docs/en/cli-reference/storage/update/azureblob.md +++ b/docs/en/cli-reference/storage/update/azureblob.md @@ -195,10 +195,10 @@ DESCRIPTION: avoid the time out. --access-tier - Access tier of blob: hot, cool or archive. + Access tier of blob: hot, cool, cold or archive. - Archived blobs can be restored by setting access tier to hot or - cool. Leave blank if you intend to use default access tier, which is + Archived blobs can be restored by setting access tier to hot, cool or + cold. Leave blank if you intend to use default access tier, which is set at account level If there is no "access tier" specified, rclone doesn't apply any tier. @@ -206,7 +206,7 @@ DESCRIPTION: are not modified, specifying "access tier" to new one will have no effect. If blobs are in "archive tier" at remote, trying to perform data transfer operations from remote will not be allowed. User should first restore by - tiering blob to "Hot" or "Cool". + tiering blob to "Hot", "Cool" or "Cold". --archive-tier-delete Delete archive tier blobs before overwriting. @@ -233,13 +233,10 @@ DESCRIPTION: to start uploading. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --encoding The encoding for the backend. @@ -255,6 +252,16 @@ DESCRIPTION: | blob | Blob data within this container can be read via anonymous request. | container | Allow full public read access for container and blob data. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option + creates an empty object ending with "/", to persist the folder. + + This object also has the metadata "hdi_isfolder = true" to conform to + the Microsoft standard. + + --no-check-container If set, don't attempt to check the container exists or create it. @@ -265,6 +272,17 @@ DESCRIPTION: --no-head-object If set, do not do HEAD before GET when getting objects. + --delete-snapshots + Set to specify how to deal with snapshots on blob deletion. + + Examples: + | | By default, the delete operation fails if a blob has snapshots + | include | Specify 'include' to remove the root blob and all its snapshots + | only | Specify 'only' to remove only the snapshots but keep the root blob. + + --description + Description of the remote. + OPTIONS: --account value Azure Storage Account Name. [$ACCOUNT] @@ -280,16 +298,19 @@ OPTIONS: Advanced - --access-tier value Access tier of blob: hot, cool or archive. [$ACCESS_TIER] + --access-tier value Access tier of blob: hot, cool, cold or archive. [$ACCESS_TIER] --archive-tier-delete Delete archive tier blobs before overwriting. (default: false) [$ARCHIVE_TIER_DELETE] --chunk-size value Upload chunk size. (default: "4Mi") [$CHUNK_SIZE] --client-send-certificate-chain Send the certificate chain when using certificate auth. (default: false) [$CLIENT_SEND_CERTIFICATE_CHAIN] + --delete-snapshots value Set to specify how to deal with snapshots on blob deletion. [$DELETE_SNAPSHOTS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8") [$ENCODING] --endpoint value Endpoint for the service. [$ENDPOINT] --list-chunk value Size of blob list. (default: 5000) [$LIST_CHUNK] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] --msi-client-id value Object ID of the user-assigned MSI to use, if any. [$MSI_CLIENT_ID] --msi-mi-res-id value Azure resource ID of the user-assigned MSI to use, if any. [$MSI_MI_RES_ID] --msi-object-id value Object ID of the user-assigned MSI to use, if any. [$MSI_OBJECT_ID] @@ -317,7 +338,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/b2.md b/docs/en/cli-reference/storage/update/b2.md index 0a0700ac..b8dd1f58 100644 --- a/docs/en/cli-reference/storage/update/b2.md +++ b/docs/en/cli-reference/storage/update/b2.md @@ -31,7 +31,7 @@ DESCRIPTION: * "force_cap_exceeded" These will be set in the "X-Bz-Test-Mode" header which is documented - in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html). + in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist). --versions Include old versions in directory listings. @@ -73,6 +73,16 @@ DESCRIPTION: 5,000,000 Bytes is the minimum size. + --upload-concurrency + Concurrency for multipart uploads. + + This is the number of chunks of the same file that are uploaded + concurrently. + + Note that chunks are stored in memory and there may be up to + "--transfers" * "--b2-upload-concurrency" chunks stored at once + in memory. + --disable-checksum Disable checksums for large (> upload cutoff) files. @@ -100,24 +110,51 @@ DESCRIPTION: (No trailing "/", "file" or "bucket") --download-auth-duration - Time before the authorization token will expire in s or suffix ms|s|m|h|d. + Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. + + This is used in combination with "rclone link" for making files + accessible to the public and sets the duration before the download + authorization token will expire. - The duration before the download authorization token will expire. The minimum value is 1 second. The maximum value is one week. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) + + --lifecycle + Set the number of days deleted files should be kept when creating a bucket. + + On bucket creation, this parameter is used to create a lifecycle rule + for the entire bucket. + + If lifecycle is 0 (the default) it does not create a lifecycle rule so + the default B2 behaviour applies. This is to create versions of files + on delete and overwrite and to keep them indefinitely. + + If lifecycle is >0 then it creates a single rule setting the number of + days before a file that is deleted or overwritten is deleted + permanently. This is known as daysFromHidingToDeleting in the b2 docs. + + The minimum value for this parameter is 1 day. + + You can also enable hard_delete in the config also which will mean + deletions won't cause versions but overwrites will still cause + versions to be made. + + See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --account value Account ID or Application Key ID. [$ACCOUNT] @@ -129,14 +166,17 @@ OPTIONS: --chunk-size value Upload chunk size. (default: "96Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4Gi") [$COPY_CUTOFF] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Disable checksums for large (> upload cutoff) files. (default: false) [$DISABLE_CHECKSUM] - --download-auth-duration value Time before the authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] + --download-auth-duration value Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. (default: "1w") [$DOWNLOAD_AUTH_DURATION] --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --endpoint value Endpoint for the service. [$ENDPOINT] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] + --lifecycle value Set the number of days deleted files should be kept when creating a bucket. (default: 0) [$LIFECYCLE] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] --test-mode value A flag string for X-Bz-Test-Mode header for debugging. [$TEST_MODE] + --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] --versions Include old versions in directory listings. (default: false) [$VERSIONS] @@ -154,7 +194,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/box.md b/docs/en/cli-reference/storage/update/box.md index 51f9d6cd..5fa04e82 100644 --- a/docs/en/cli-reference/storage/update/box.md +++ b/docs/en/cli-reference/storage/update/box.md @@ -66,11 +66,28 @@ DESCRIPTION: --owned-by Only show items owned by the login (email address) passed in. + --impersonate + Impersonate this user ID when using a service account. + + Setting this flag allows rclone, when using a JWT service account, to + act on behalf of another user by setting the as-user header. + + The user ID is the Box identifier for a user. User IDs can found for + any user via the GET /users endpoint, which is only available to + admins, or by calling the GET /users/me endpoint with an authenticated + user session. + + See: https://developer.box.com/guides/authentication/jwt/as-user/ + + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-token value Box App Primary Access Token [$ACCESS_TOKEN] @@ -84,7 +101,9 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --commit-retries value Max number of times to try committing a multipart file. (default: 100) [$COMMIT_RETRIES] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot") [$ENCODING] + --impersonate value Impersonate this user ID when using a service account. [$IMPERSONATE] --list-chunk value Size of listing chunk 1-1000. (default: 1000) [$LIST_CHUNK] --owned-by value Only show items owned by the login (email address) passed in. [$OWNED_BY] --root-folder-id value Fill in for rclone to use a non root folder as its starting point. (default: "0") [$ROOT_FOLDER_ID] @@ -105,7 +124,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/drive.md b/docs/en/cli-reference/storage/update/drive.md index f26c00ae..d6dc1252 100644 --- a/docs/en/cli-reference/storage/update/drive.md +++ b/docs/en/cli-reference/storage/update/drive.md @@ -34,7 +34,7 @@ DESCRIPTION: Leave blank to use the provider defaults. --scope - Scope that rclone should use when requesting access from drive. + Comma separated list of scopes that rclone should use when requesting access from drive. Examples: | drive | Full access all files, excluding Application Data Folder. @@ -95,14 +95,31 @@ DESCRIPTION: If given, gdocs practically become invisible to rclone. + --show-all-gdocs + Show all Google Docs including non-exportable ones in listings. + + If you try a server side copy on a Google Form without this flag, you + will get this error: + + No export formats found for "application/vnd.google-apps.form" + + However adding this flag will allow the form to be server side copied. + + Note that rclone doesn't add extensions to the Google Docs file names + in this mode. + + Do **not** use this flag when trying to download Google Docs - rclone + will fail to download them. + + --skip-checksum-gphotos - Skip MD5 checksum on Google photos and videos only. + Skip checksums on Google photos and videos only. Use this if you get checksum errors when transferring Google photos or videos. Setting this flag will cause Google photos and videos to return a - blank MD5 checksum. + blank checksums. Google photos are identified by being in the "photos" space. @@ -233,6 +250,8 @@ DESCRIPTION: Number of API calls to allow without sleeping. --server-side-across-configs + Deprecated: use --server-side-across-configs instead. + Allow server-side operations (e.g. copy) to work across different drive configs. This can be useful if you wish to do a server-side copy between two @@ -311,21 +330,115 @@ DESCRIPTION: Note also that opening the folder once in the web interface (with the user you've authenticated rclone with) seems to be enough so that the - resource key is no needed. + resource key is not needed. + + + --fast-list-bug-fix + Work around a bug in Google Drive listing. + + Normally rclone will work around a bug in Google Drive when using + --fast-list (ListR) where the search "(A in parents) or (B in + parents)" returns nothing sometimes. See #3114, #4289 and + https://issuetracker.google.com/issues/149522397 + + Rclone detects this by finding no items in more than one directory + when listing and retries them as lists of individual directories. + + This means that if you have a lot of empty directories rclone will end + up listing them all individually and this can take many more API + calls. + + This flag allows the work-around to be disabled. This is **not** + recommended in normal use - only if you have a particular case you are + having trouble with like many empty directories. + + + --metadata-owner + Control whether owner should be read or written in metadata. + + Owner is a standard part of the file metadata so is easy to read. But it + isn't always desirable to set the owner from the metadata. + + Note that you can't set the owner on Shared Drives, and that setting + ownership will generate an email to the new owner (this can't be + disabled), and you can't transfer ownership to someone outside your + organization. + + + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | failok | If writing fails log errors only, don't fail the transfer + | read,write | Read and Write the value. + + --metadata-permissions + Control whether permissions should be read or written in metadata. + + Reading permissions metadata from files can be done quickly, but it + isn't always desirable to set the permissions from the metadata. + + Note that rclone drops any inherited permissions on Shared Drives and + any owner permission on My Drives as these are duplicated in the owner + metadata. + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | failok | If writing fails log errors only, don't fail the transfer + | read,write | Read and Write the value. + + --metadata-labels + Control whether labels should be read or written in metadata. + + Reading labels metadata from files takes an extra API transaction and + will slow down listings. It isn't always desirable to set the labels + from the metadata. + + The format of labels is documented in the drive API documentation at + https://developers.google.com/drive/api/reference/rest/v3/Label - + rclone just provides a JSON dump of this format. + + When setting labels, the label and fields must already exist - rclone + will not create them. This means that if you are transferring labels + from two different accounts you will have to create the labels in + advance and use the metadata mapper to translate the IDs between the + two accounts. + + + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | failok | If writing fails log errors only, don't fail the transfer + | read,write | Read and Write the value. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --env-auth + Get IAM credentials from runtime (environment variables or instance meta data if no env vars). + + Only applies if service_account_file and service_account_credentials is blank. + + Examples: + | false | Enter credentials in the next step. + | true | Get GCP IAM credentials from the environment (env vars or IAM). + + --description + Description of the remote. + OPTIONS: --alternate-export Deprecated: No longer needed. (default: false) [$ALTERNATE_EXPORT] --client-id value Google Application Client Id [$CLIENT_ID] --client-secret value OAuth Client Secret. [$CLIENT_SECRET] --help, -h show help - --scope value Scope that rclone should use when requesting access from drive. [$SCOPE] + --scope value Comma separated list of scopes that rclone should use when requesting access from drive. [$SCOPE] --service-account-file value Service Account Credentials JSON file path. [$SERVICE_ACCOUNT_FILE] Advanced @@ -336,23 +449,30 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "8Mi") [$CHUNK_SIZE] --copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut. (default: false) [$COPY_SHORTCUT_CONTENT] + --description value Description of the remote. [$DESCRIPTION] --disable-http2 Disable drive using http2. (default: true) [$DISABLE_HTTP2] --encoding value The encoding for the backend. (default: "InvalidUtf8") [$ENCODING] + --env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars). (default: false) [$ENV_AUTH] --export-formats value Comma separated list of preferred formats for downloading Google docs. (default: "docx,xlsx,pptx,svg") [$EXPORT_FORMATS] + --fast-list-bug-fix Work around a bug in Google Drive listing. (default: true) [$FAST_LIST_BUG_FIX] --formats value Deprecated: See export_formats. [$FORMATS] --impersonate value Impersonate this user when using a service account. [$IMPERSONATE] --import-formats value Comma separated list of preferred formats for uploading Google docs. [$IMPORT_FORMATS] --keep-revision-forever Keep new head revision of each file forever. (default: false) [$KEEP_REVISION_FOREVER] --list-chunk value Size of listing chunk 100-1000, 0 to disable. (default: 1000) [$LIST_CHUNK] + --metadata-labels value Control whether labels should be read or written in metadata. (default: "off") [$METADATA_LABELS] + --metadata-owner value Control whether owner should be read or written in metadata. (default: "read") [$METADATA_OWNER] + --metadata-permissions value Control whether permissions should be read or written in metadata. (default: "off") [$METADATA_PERMISSIONS] --pacer-burst value Number of API calls to allow without sleeping. (default: 100) [$PACER_BURST] --pacer-min-sleep value Minimum time to sleep between API calls. (default: "100ms") [$PACER_MIN_SLEEP] --resource-key value Resource key for accessing a link-shared file. [$RESOURCE_KEY] --root-folder-id value ID of the root folder. [$ROOT_FOLDER_ID] - --server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] + --server-side-across-configs Deprecated: use --server-side-across-configs instead. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] --service-account-credentials value Service Account Credentials JSON blob. [$SERVICE_ACCOUNT_CREDENTIALS] --shared-with-me Only show files that are shared with me. (default: false) [$SHARED_WITH_ME] + --show-all-gdocs Show all Google Docs including non-exportable ones in listings. (default: false) [$SHOW_ALL_GDOCS] --size-as-quota Show sizes as storage quota usage, not actual size. (default: false) [$SIZE_AS_QUOTA] - --skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only. (default: false) [$SKIP_CHECKSUM_GPHOTOS] + --skip-checksum-gphotos Skip checksums on Google photos and videos only. (default: false) [$SKIP_CHECKSUM_GPHOTOS] --skip-dangling-shortcuts If set skip dangling shortcut files. (default: false) [$SKIP_DANGLING_SHORTCUTS] --skip-gdocs Skip google documents in all listings. (default: false) [$SKIP_GDOCS] --skip-shortcuts If set skip shortcut files. (default: false) [$SKIP_SHORTCUTS] @@ -382,7 +502,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/dropbox.md b/docs/en/cli-reference/storage/update/dropbox.md index 381df7dc..2ad349fa 100644 --- a/docs/en/cli-reference/storage/update/dropbox.md +++ b/docs/en/cli-reference/storage/update/dropbox.md @@ -80,6 +80,20 @@ DESCRIPTION: Note that we don't unmount the shared folder afterwards so the --dropbox-shared-folders can be omitted after the first use of a particular shared folder. + + See also --dropbox-root-namespace for an alternative way to work with shared + folders. + + --pacer-min-sleep + Minimum time to sleep between API calls. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --root-namespace + Specify a different Dropbox namespace ID to use as the root for all paths. --batch-mode Upload file batching sync|async|off. @@ -103,7 +117,7 @@ DESCRIPTION: This sets the batch size of files to upload. It has to be less than 1000. - By default this is 0 which means rclone which calculate the batch size + By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. - batch_mode: async - default batch_size is 100 @@ -127,18 +141,16 @@ DESCRIPTION: The default for this is 0 which means rclone will choose a sensible default based on the batch_mode in use. - - batch_mode: async - default batch_timeout is 500ms - - batch_mode: sync - default batch_timeout is 10s + - batch_mode: async - default batch_timeout is 10s + - batch_mode: sync - default batch_timeout is 500ms - batch_mode: off - not in use --batch-commit-timeout Max time to wait for a batch to finish committing - --encoding - The encoding for the backend. - - See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. OPTIONS: @@ -154,8 +166,11 @@ OPTIONS: --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] --chunk-size value Upload chunk size (< 150Mi). (default: "48Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot") [$ENCODING] --impersonate value Impersonate this user when using a business account. [$IMPERSONATE] + --pacer-min-sleep value Minimum time to sleep between API calls. (default: "10ms") [$PACER_MIN_SLEEP] + --root-namespace value Specify a different Dropbox namespace ID to use as the root for all paths. [$ROOT_NAMESPACE] --shared-files Instructs rclone to work on individual shared files. (default: false) [$SHARED_FILES] --shared-folders Instructs rclone to work on shared folders. (default: false) [$SHARED_FOLDERS] --token value OAuth Access Token as a JSON blob. [$TOKEN] @@ -174,7 +189,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/fichier.md b/docs/en/cli-reference/storage/update/fichier.md index 5cf87b49..e7204ba4 100644 --- a/docs/en/cli-reference/storage/update/fichier.md +++ b/docs/en/cli-reference/storage/update/fichier.md @@ -21,11 +21,17 @@ DESCRIPTION: --folder-password If you want to list the files in a shared folder that is password protected, add this parameter. + --cdn + Set if you wish to use CDN download links. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --api-key value Your API Key, get it from https://1fichier.com/console/params.pl. [$API_KEY] @@ -33,6 +39,8 @@ OPTIONS: Advanced + --cdn Set if you wish to use CDN download links. (default: false) [$CDN] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot") [$ENCODING] --file-password value If you want to download a shared file that is password protected, add this parameter. [$FILE_PASSWORD] --folder-password value If you want to list the files in a shared folder that is password protected, add this parameter. [$FOLDER_PASSWORD] @@ -51,7 +59,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/filefabric.md b/docs/en/cli-reference/storage/update/filefabric.md index 7b4f6fa3..21e1071a 100644 --- a/docs/en/cli-reference/storage/update/filefabric.md +++ b/docs/en/cli-reference/storage/update/filefabric.md @@ -64,6 +64,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -73,6 +76,7 @@ OPTIONS: Advanced + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --token value Session Token. [$TOKEN] --token-expiry value Token expiry time. [$TOKEN_EXPIRY] @@ -91,7 +95,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/ftp.md b/docs/en/cli-reference/storage/update/ftp.md index cc371104..8a21ba7d 100644 --- a/docs/en/cli-reference/storage/update/ftp.md +++ b/docs/en/cli-reference/storage/update/ftp.md @@ -104,6 +104,16 @@ DESCRIPTION: If this is set and no password is supplied then rclone will ask for a password + --socks-proxy + Socks 5 proxy host. + + Supports the format user:pass@host:port, user@host:port, host:port. + + Example: + + myUser:myPass@localhost:9005 + + --encoding The encoding for the backend. @@ -114,6 +124,9 @@ DESCRIPTION: | BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket | PureFTPd can't handle '[]' or '*' in file names | Ctl,LeftPeriod,Slash | VsFTPd can't handle file names starting with dot + --description + Description of the remote. + OPTIONS: --explicit-tls Use Explicit FTPS (FTP over TLS). (default: false) [$EXPLICIT_TLS] @@ -129,6 +142,7 @@ OPTIONS: --ask-password Allow asking for FTP password when needed. (default: false) [$ASK_PASSWORD] --close-timeout value Maximum time to wait for a response to close. (default: "1m0s") [$CLOSE_TIMEOUT] --concurrency value Maximum number of FTP simultaneous connections, 0 for unlimited. (default: 0) [$CONCURRENCY] + --description value Description of the remote. [$DESCRIPTION] --disable-epsv Disable using EPSV even if server advertises support. (default: false) [$DISABLE_EPSV] --disable-mlsd Disable using MLSD even if server advertises support. (default: false) [$DISABLE_MLSD] --disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) (default: false) [$DISABLE_TLS13] @@ -138,6 +152,7 @@ OPTIONS: --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] --no-check-certificate Do not verify the TLS certificate of the server. (default: false) [$NO_CHECK_CERTIFICATE] --shut-timeout value Maximum time to wait for data connection closing status. (default: "1m0s") [$SHUT_TIMEOUT] + --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] --tls-cache-size value Size of TLS session cache for all control and data connections. (default: 32) [$TLS_CACHE_SIZE] --writing-mdtm Use MDTM to set modification time (VsFtpd quirk) (default: false) [$WRITING_MDTM] @@ -154,7 +169,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/gcs.md b/docs/en/cli-reference/storage/update/gcs.md index 0195171d..311f27b3 100644 --- a/docs/en/cli-reference/storage/update/gcs.md +++ b/docs/en/cli-reference/storage/update/gcs.md @@ -37,6 +37,11 @@ DESCRIPTION: Optional - needed only for list/create/delete buckets - see your developer console. + --user-project + User project. + + Optional - needed only for requester pays. + --service-account-file Service Account Credentials JSON file path. @@ -155,6 +160,13 @@ DESCRIPTION: | ARCHIVE | Archive storage class | DURABLE_REDUCED_AVAILABILITY | Durable reduced availability storage class + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -192,6 +204,9 @@ DESCRIPTION: | false | Enter credentials in the next step. | true | Get GCP IAM credentials from the environment (env vars or IAM). + --description + Description of the remote. + OPTIONS: --anonymous Access public buckets and objects without credentials. (default: false) [$ANONYMOUS] @@ -207,16 +222,19 @@ OPTIONS: --service-account-credentials value Service Account Credentials JSON blob. [$SERVICE_ACCOUNT_CREDENTIALS] --service-account-file value Service Account Credentials JSON file path. [$SERVICE_ACCOUNT_FILE] --storage-class value The storage class to use when storing objects in Google Cloud Storage. [$STORAGE_CLASS] + --user-project value User project. [$USER_PROJECT] Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] - --endpoint value Endpoint for the service. [$ENDPOINT] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] + --endpoint value Endpoint for the service. [$ENDPOINT] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -231,7 +249,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/gphotos.md b/docs/en/cli-reference/storage/update/gphotos.md index 0f0a2e1a..5169a5d7 100644 --- a/docs/en/cli-reference/storage/update/gphotos.md +++ b/docs/en/cli-reference/storage/update/gphotos.md @@ -70,6 +70,61 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --batch-mode + Upload file batching sync|async|off. + + This sets the batch mode used by rclone. + + This has 3 possible values + + - off - no batching + - sync - batch uploads and check completion (default) + - async - batch upload and don't check completion + + Rclone will close any outstanding batches when it exits which may make + a delay on quit. + + + --batch-size + Max number of files in upload batch. + + This sets the batch size of files to upload. It has to be less than 50. + + By default this is 0 which means rclone will calculate the batch size + depending on the setting of batch_mode. + + - batch_mode: async - default batch_size is 50 + - batch_mode: sync - default batch_size is the same as --transfers + - batch_mode: off - not in use + + Rclone will close any outstanding batches when it exits which may make + a delay on quit. + + Setting this is a great idea if you are uploading lots of small files + as it will make them a lot quicker. You can use --transfers 32 to + maximise throughput. + + + --batch-timeout + Max time to allow an idle upload batch before uploading. + + If an upload batch is idle for more than this long then it will be + uploaded. + + The default for this is 0 which means rclone will choose a sensible + default based on the batch_mode in use. + + - batch_mode: async - default batch_timeout is 10s + - batch_mode: sync - default batch_timeout is 1s + - batch_mode: off - not in use + + + --batch-commit-timeout + Max time to wait for a batch to finish committing + + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -79,13 +134,18 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] - --include-archived Also view and download archived media. (default: false) [$INCLUDE_ARCHIVED] - --read-size Set to read the size of media items. (default: false) [$READ_SIZE] - --start-year value Year limits the photos to be downloaded to those which are uploaded after the given year. (default: 2000) [$START_YEAR] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --batch-commit-timeout value Max time to wait for a batch to finish committing (default: "10m0s") [$BATCH_COMMIT_TIMEOUT] + --batch-mode value Upload file batching sync|async|off. (default: "sync") [$BATCH_MODE] + --batch-size value Max number of files in upload batch. (default: 0) [$BATCH_SIZE] + --batch-timeout value Max time to allow an idle upload batch before uploading. (default: "0s") [$BATCH_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,CrLf,InvalidUtf8,Dot") [$ENCODING] + --include-archived Also view and download archived media. (default: false) [$INCLUDE_ARCHIVED] + --read-size Set to read the size of media items. (default: false) [$READ_SIZE] + --start-year value Year limits the photos to be downloaded to those which are uploaded after the given year. (default: 2000) [$START_YEAR] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -100,7 +160,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/hdfs.md b/docs/en/cli-reference/storage/update/hdfs.md index 613e30a9..23abf888 100644 --- a/docs/en/cli-reference/storage/update/hdfs.md +++ b/docs/en/cli-reference/storage/update/hdfs.md @@ -10,9 +10,9 @@ USAGE: DESCRIPTION: --namenode - Hadoop name node and port. + Hadoop name nodes and ports. - E.g. "namenode:8020" to connect to host namenode at port 8020. + E.g. "namenode-1:8020,namenode-2:8020,..." to connect to host namenodes at port 8020. --username Hadoop user name. @@ -31,9 +31,9 @@ DESCRIPTION: Kerberos data transfer protection: authentication|integrity|privacy. Specifies whether or not authentication, data signature integrity - checks, and wire encryption is required when communicating the the - datanodes. Possible values are 'authentication', 'integrity' and - 'privacy'. Used only with KERBEROS enabled. + checks, and wire encryption are required when communicating with + the datanodes. Possible values are 'authentication', 'integrity' + and 'privacy'. Used only with KERBEROS enabled. Examples: | privacy | Ensure authentication, integrity and encryption enabled. @@ -43,15 +43,19 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help - --namenode value Hadoop name node and port. [$NAMENODE] + --namenode value Hadoop name nodes and ports. [$NAMENODE] --username value Hadoop user name. [$USERNAME] Advanced --data-transfer-protection value Kerberos data transfer protection: authentication|integrity|privacy. [$DATA_TRANSFER_PROTECTION] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Colon,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --service-principal-name value Kerberos service principal name for the namenode. [$SERVICE_PRINCIPAL_NAME] @@ -68,7 +72,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/hidrive.md b/docs/en/cli-reference/storage/update/hidrive.md index 95f8d75e..31080566 100644 --- a/docs/en/cli-reference/storage/update/hidrive.md +++ b/docs/en/cli-reference/storage/update/hidrive.md @@ -107,6 +107,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -118,6 +121,7 @@ OPTIONS: --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Chunksize for chunked uploads. (default: "48Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] --disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary. (default: false) [$DISABLE_FETCHING_MEMBER_COUNT] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] --endpoint value Endpoint for the service. (default: "https://api.hidrive.strato.com/2.1") [$ENDPOINT] @@ -141,7 +145,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/http.md b/docs/en/cli-reference/storage/update/http.md index 9cd45402..0c82ffb1 100644 --- a/docs/en/cli-reference/storage/update/http.md +++ b/docs/en/cli-reference/storage/update/http.md @@ -56,16 +56,24 @@ DESCRIPTION: that directory listings are much quicker, but rclone won't have the times or sizes of any files, and some files that don't exist may be in the listing. + --no-escape + Do not escape URL metacharacters in path names. + + --description + Description of the remote. + OPTIONS: --help, -h show help + --no-escape Do not escape URL metacharacters in path names. (default: false) [$NO_ESCAPE] --url value URL of HTTP host to connect to. [$URL] Advanced - --headers value Set HTTP headers for all transactions. [$HEADERS] - --no-head Don't use HEAD requests. (default: false) [$NO_HEAD] - --no-slash Set this if the site doesn't end directories with /. (default: false) [$NO_SLASH] + --description value Description of the remote. [$DESCRIPTION] + --headers value Set HTTP headers for all transactions. [$HEADERS] + --no-head Don't use HEAD requests. (default: false) [$NO_HEAD] + --no-slash Set this if the site doesn't end directories with /. (default: false) [$NO_SLASH] Client Config @@ -80,7 +88,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/internetarchive.md b/docs/en/cli-reference/storage/update/internetarchive.md index 2b43f99b..88199e1f 100644 --- a/docs/en/cli-reference/storage/update/internetarchive.md +++ b/docs/en/cli-reference/storage/update/internetarchive.md @@ -47,6 +47,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-key-id value IAS3 Access Key. [$ACCESS_KEY_ID] @@ -55,6 +58,7 @@ OPTIONS: Advanced + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone. (default: true) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --endpoint value IAS3 Endpoint. (default: "https://s3.us.archive.org") [$ENDPOINT] @@ -74,7 +78,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/jottacloud.md b/docs/en/cli-reference/storage/update/jottacloud.md index e1fc3a71..7c281450 100644 --- a/docs/en/cli-reference/storage/update/jottacloud.md +++ b/docs/en/cli-reference/storage/update/jottacloud.md @@ -9,6 +9,29 @@ USAGE: singularity storage update jottacloud [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --md5-memory-limit Files bigger than this will be cached on disk to calculate the MD5 if required. @@ -31,16 +54,25 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --help, -h show help + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help Advanced + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] --md5-memory-limit value Files bigger than this will be cached on disk to calculate the MD5 if required. (default: "10Mi") [$MD5_MEMORY_LIMIT] --no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them. (default: false) [$NO_VERSIONS] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] --trashed-only Only show files that are in the trash. (default: false) [$TRASHED_ONLY] --upload-resume-limit value Files bigger than this can be resumed if the upload fail's. (default: "10Mi") [$UPLOAD_RESUME_LIMIT] @@ -57,7 +89,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/koofr/digistorage.md b/docs/en/cli-reference/storage/update/koofr/digistorage.md index 46379074..ffd0f553 100644 --- a/docs/en/cli-reference/storage/update/koofr/digistorage.md +++ b/docs/en/cli-reference/storage/update/koofr/digistorage.md @@ -23,24 +23,28 @@ DESCRIPTION: Your user name. --password - Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). + Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help - --password value Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). [$PASSWORD] + --password value Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. [$PASSWORD] --user value Your user name. [$USER] Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --mountid value Mount ID of the mount to use. [$MOUNTID] - --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --mountid value Mount ID of the mount to use. [$MOUNTID] + --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] Client Config @@ -55,7 +59,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/koofr/koofr.md b/docs/en/cli-reference/storage/update/koofr/koofr.md index 3dbababf..f697cbc3 100644 --- a/docs/en/cli-reference/storage/update/koofr/koofr.md +++ b/docs/en/cli-reference/storage/update/koofr/koofr.md @@ -23,24 +23,28 @@ DESCRIPTION: Your user name. --password - Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). + Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help - --password value Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). [$PASSWORD] + --password value Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. [$PASSWORD] --user value Your user name. [$USER] Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --mountid value Mount ID of the mount to use. [$MOUNTID] - --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --mountid value Mount ID of the mount to use. [$MOUNTID] + --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] Client Config @@ -55,7 +59,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/koofr/other.md b/docs/en/cli-reference/storage/update/koofr/other.md index 1384af70..2da2abab 100644 --- a/docs/en/cli-reference/storage/update/koofr/other.md +++ b/docs/en/cli-reference/storage/update/koofr/other.md @@ -33,6 +33,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --endpoint value The Koofr API endpoint to use. [$ENDPOINT] @@ -42,9 +45,10 @@ OPTIONS: Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --mountid value Mount ID of the mount to use. [$MOUNTID] - --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --mountid value Mount ID of the mount to use. [$MOUNTID] + --setmtime Does the backend support setting modification time. (default: true) [$SETMTIME] Client Config @@ -59,7 +63,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/local.md b/docs/en/cli-reference/storage/update/local.md index 907ca56b..7ea63a2c 100644 --- a/docs/en/cli-reference/storage/update/local.md +++ b/docs/en/cli-reference/storage/update/local.md @@ -81,6 +81,11 @@ DESCRIPTION: - Only checksum the size that stat gave - Don't update the stat info for the file + **NB** do not use this flag on a Windows Volume Shadow (VSS). For some + unknown reason, files in a VSS sometimes show different sizes from the + directory listing (where the initial stat value comes from on Windows) + and when stat is called on them directly. Other copy tools always use + the direct stat value and setting this flag will disable that. --one-file-system @@ -100,6 +105,24 @@ DESCRIPTION: Windows/macOS and case sensitive for everything else. Use this flag to override the default choice. + --no-clone + Disable reflink cloning for server-side copies. + + Normally, for local-to-local transfers, rclone will "clone" the file when + possible, and fall back to "copying" only when cloning is not supported. + + Cloning creates a shallow copy (or "reflink") which initially shares blocks with + the original file. Unlike a "hardlink", the two files are independent and + neither will affect the other if subsequently modified. + + Cloning is usually preferable to copying, as it is much faster and is + deduplicated by default (i.e. having two identical files does not consume more + storage than having just one.) However, for use cases where data redundancy is + preferable, --local-no-clone can be used to disable cloning and force "deep" copies. + + Currently, cloning is only supported when using APFS on macOS (support for other + platforms may be added in the future.) + --no-preallocate Disable preallocation of disk space for transferred files. @@ -126,11 +149,41 @@ DESCRIPTION: when copying to a CIFS mount owned by another user. If this option is enabled, rclone will no longer update the modtime after copying a file. + --time-type + Set what kind of time is returned. + + Normally rclone does all operations on the mtime or Modification time. + + If you set this flag then rclone will return the Modified time as whatever + you set here. So if you use "rclone lsl --local-time-type ctime" then + you will see ctimes in the listing. + + If the OS doesn't support returning the time_type specified then rclone + will silently replace it with the modification time which all OSes support. + + - mtime is supported by all OSes + - atime is supported on all OSes except: plan9, js + - btime is only supported on: Windows, macOS, freebsd, netbsd + - ctime is supported on all Oses except: Windows, plan9, js + + Note that setting the time will still set the modified time so this is + only useful for reading. + + + Examples: + | mtime | The last modification time. + | atime | The last access time. + | btime | The creation time. + | ctime | The last status change time. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -140,15 +193,18 @@ OPTIONS: --case-insensitive Force the filesystem to report itself as case insensitive. (default: false) [$CASE_INSENSITIVE] --case-sensitive Force the filesystem to report itself as case sensitive. (default: false) [$CASE_SENSITIVE] --copy-links, -L Follow symlinks and copy the pointed to item. (default: false) [$COPY_LINKS] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Dot") [$ENCODING] --links, -l Translate symlinks to/from regular files with a '.rclonelink' extension. (default: false) [$LINKS] --no-check-updated Don't check to see if the files change during upload. (default: false) [$NO_CHECK_UPDATED] + --no-clone Disable reflink cloning for server-side copies. (default: false) [$NO_CLONE] --no-preallocate Disable preallocation of disk space for transferred files. (default: false) [$NO_PREALLOCATE] --no-set-modtime Disable setting modtime. (default: false) [$NO_SET_MODTIME] --no-sparse Disable sparse files for multi-thread downloads. (default: false) [$NO_SPARSE] --nounc Disable UNC (long path names) conversion on Windows. (default: false) [$NOUNC] --one-file-system, -x Don't cross filesystem boundaries (unix/macOS only). (default: false) [$ONE_FILE_SYSTEM] --skip-links Don't warn about skipped symlinks. (default: false) [$SKIP_LINKS] + --time-type value Set what kind of time is returned. (default: "mtime") [$TIME_TYPE] --unicode-normalization Apply unicode NFC normalization to paths and filenames. (default: false) [$UNICODE_NORMALIZATION] --zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated). (default: false) [$ZERO_SIZE_LINKS] diff --git a/docs/en/cli-reference/storage/update/mailru.md b/docs/en/cli-reference/storage/update/mailru.md index 278e9b6c..dfc0c53d 100644 --- a/docs/en/cli-reference/storage/update/mailru.md +++ b/docs/en/cli-reference/storage/update/mailru.md @@ -9,6 +9,29 @@ USAGE: singularity storage update mailru [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --user User name (usually email). @@ -91,21 +114,30 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --help, -h show help - --pass value Password. [$PASS] - --speedup-enable Skip full upload if there is another file with same data hash. (default: true) [$SPEEDUP_ENABLE] - --user value User name (usually email). [$USER] + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help + --pass value Password. [$PASS] + --speedup-enable Skip full upload if there is another file with same data hash. (default: true) [$SPEEDUP_ENABLE] + --user value User name (usually email). [$USER] Advanced + --auth-url value Auth server URL. [$AUTH_URL] --check-hash What should copy do if file checksum is mismatched or invalid. (default: true) [$CHECK_HASH] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --quirks value Comma separated list of internal maintenance flags. [$QUIRKS] --speedup-file-patterns value Comma separated list of file name patterns eligible for speedup (put by hash). (default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") [$SPEEDUP_FILE_PATTERNS] --speedup-max-disk value This option allows you to disable speedup (put by hash) for large files. (default: "3Gi") [$SPEEDUP_MAX_DISK] --speedup-max-memory value Files larger than the size given below will always be hashed on disk. (default: "32Mi") [$SPEEDUP_MAX_MEMORY] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] --user-agent value HTTP user agent used internally by client. [$USER_AGENT] Client Config @@ -121,7 +153,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/mega.md b/docs/en/cli-reference/storage/update/mega.md index 89481453..8867c98c 100644 --- a/docs/en/cli-reference/storage/update/mega.md +++ b/docs/en/cli-reference/storage/update/mega.md @@ -34,7 +34,7 @@ DESCRIPTION: MEGA uses plain text HTTP connections by default. Some ISPs throttle HTTP connections, this causes transfers to become very slow. Enabling this will force MEGA to use HTTPS for all transfers. - HTTPS is normally not necesary since all data is already encrypted anyway. + HTTPS is normally not necessary since all data is already encrypted anyway. Enabling it will increase CPU usage and add network overhead. --encoding @@ -42,6 +42,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -50,10 +53,11 @@ OPTIONS: Advanced - --debug Output more debug from Mega. (default: false) [$DEBUG] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] - --use-https Use HTTPS for transfers. (default: false) [$USE_HTTPS] + --debug Output more debug from Mega. (default: false) [$DEBUG] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --use-https Use HTTPS for transfers. (default: false) [$USE_HTTPS] Client Config @@ -68,7 +72,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/netstorage.md b/docs/en/cli-reference/storage/update/netstorage.md index 75006586..eb63cb2d 100644 --- a/docs/en/cli-reference/storage/update/netstorage.md +++ b/docs/en/cli-reference/storage/update/netstorage.md @@ -32,6 +32,9 @@ DESCRIPTION: Please choose the 'y' option to set your own password then enter your secret. + --description + Description of the remote. + OPTIONS: --account value Set the NetStorage account name [$ACCOUNT] @@ -41,7 +44,8 @@ OPTIONS: Advanced - --protocol value Select between HTTP or HTTPS protocol. (default: "https") [$PROTOCOL] + --description value Description of the remote. [$DESCRIPTION] + --protocol value Select between HTTP or HTTPS protocol. (default: "https") [$PROTOCOL] Client Config @@ -56,7 +60,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/onedrive.md b/docs/en/cli-reference/storage/update/onedrive.md index 15992799..180bd138 100644 --- a/docs/en/cli-reference/storage/update/onedrive.md +++ b/docs/en/cli-reference/storage/update/onedrive.md @@ -93,11 +93,15 @@ DESCRIPTION: listing, set this option. --server-side-across-configs + Deprecated: use --server-side-across-configs instead. + Allow server-side operations (e.g. copy) to work across different onedrive configs. - This will only work if you are copying between two OneDrive *Personal* drives AND - the files to copy are already shared between them. In other cases, rclone will - fall back to normal copy (which will be slightly slower). + This will work if you are copying between two OneDrive *Personal* drives AND the files to + copy are already shared between them. Additionally, it should also function for a user who + has access permissions both between Onedrive for *business* and *SharePoint* under the *same + tenant*, and between *SharePoint* and another *SharePoint* under the *same tenant*. In other + cases, rclone will fall back to normal copy (which will be slightly slower). --list-chunk Size of listing chunk. @@ -117,6 +121,16 @@ DESCRIPTION: this flag there. + --hard-delete + Permanently delete files on removal. + + Normally files will get sent to the recycle bin on deletion. Setting + this flag causes them to be permanently deleted. Use with care. + + OneDrive personal accounts do not support the permanentDelete API, + it only applies to OneDrive for Business and SharePoint document libraries. + + --link-scope Set the scope of the links created by the link command. @@ -145,7 +159,7 @@ DESCRIPTION: Specify the hash in use for the backend. This specifies the hash type in use. If set to "auto" it will use the - default hash which is is QuickXorHash. + default hash which is QuickXorHash. Before rclone 1.62 an SHA1 hash was used by default for Onedrive Personal. For 1.62 and later the default is to use a QuickXorHash for @@ -153,7 +167,7 @@ DESCRIPTION: accordingly. From July 2023 QuickXorHash will be the only available hash for - both OneDrive for Business and OneDriver Personal. + both OneDrive for Business and OneDrive Personal. This can be set to "none" to not use any hashes. @@ -170,11 +184,73 @@ DESCRIPTION: | crc32 | CRC32 | none | None - don't use any hashes + --av-override + Allows download of files the server thinks has a virus. + + The onedrive/sharepoint server may check files uploaded with an Anti + Virus checker. If it detects any potential viruses or malware it will + block download of the file. + + In this case you will see a message like this + + server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden: + + If you are 100% sure you want to download this file anyway then use + the --onedrive-av-override flag, or av_override = true in the config + file. + + + --delta + If set rclone will use delta listing to implement recursive listings. + + If this flag is set the onedrive backend will advertise `ListR` + support for recursive listings. + + Setting this flag speeds up these things greatly: + + rclone lsf -R onedrive: + rclone size onedrive: + rclone rc vfs/refresh recursive=true + + **However** the delta listing API **only** works at the root of the + drive. If you use it not at the root then it recurses from the root + and discards all the data that is not under the directory you asked + for. So it will be correct but may not be very efficient. + + This is why this flag is not set as the default. + + As a rule of thumb if nearly all of your data is under rclone's root + directory (the `root/directory` in `onedrive:root/directory`) then + using this flag will be be a big performance win. If your data is + mostly not under the root then using this flag will be a big + performance loss. + + It is recommended if you are mounting your onedrive at the root + (or near the root when using crypt) and using rclone `rc vfs/refresh`. + + + --metadata-permissions + Control whether permissions should be read or written in metadata. + + Reading permissions metadata from files can be done quickly, but it + isn't always desirable to set the permissions from the metadata. + + + Examples: + | off | Do not read or write the value + | read | Read the value only + | write | Write the value only + | read,write | Read and Write the value. + | failok | If writing fails log errors only, don't fail the transfer + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -186,20 +262,25 @@ OPTIONS: --access-scopes value Set scopes to be requested by rclone. (default: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access") [$ACCESS_SCOPES] --auth-url value Auth server URL. [$AUTH_URL] + --av-override Allows download of files the server thinks has a virus. (default: false) [$AV_OVERRIDE] --chunk-size value Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default: "10Mi") [$CHUNK_SIZE] + --delta If set rclone will use delta listing to implement recursive listings. (default: false) [$DELTA] + --description value Description of the remote. [$DESCRIPTION] --disable-site-permission Disable the request for Sites.Read.All permission. (default: false) [$DISABLE_SITE_PERMISSION] --drive-id value The ID of the drive to use. [$DRIVE_ID] --drive-type value The type of the drive (personal | business | documentLibrary). [$DRIVE_TYPE] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --expose-onenote-files Set to make OneNote files show up in directory listings. (default: false) [$EXPOSE_ONENOTE_FILES] + --hard-delete Permanently delete files on removal. (default: false) [$HARD_DELETE] --hash-type value Specify the hash in use for the backend. (default: "auto") [$HASH_TYPE] --link-password value Set the password for links created by the link command. [$LINK_PASSWORD] --link-scope value Set the scope of the links created by the link command. (default: "anonymous") [$LINK_SCOPE] --link-type value Set the type of the links created by the link command. (default: "view") [$LINK_TYPE] --list-chunk value Size of listing chunk. (default: 1000) [$LIST_CHUNK] + --metadata-permissions value Control whether permissions should be read or written in metadata. (default: "off") [$METADATA_PERMISSIONS] --no-versions Remove all versions on modifying operations. (default: false) [$NO_VERSIONS] --root-folder-id value ID of the root folder. [$ROOT_FOLDER_ID] - --server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] + --server-side-across-configs Deprecated: use --server-side-across-configs instead. (default: false) [$SERVER_SIDE_ACROSS_CONFIGS] --token value OAuth Access Token as a JSON blob. [$TOKEN] --token-url value Token server url. [$TOKEN_URL] @@ -216,7 +297,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/oos/README.md b/docs/en/cli-reference/storage/update/oos/README.md index 34c8877e..3110e215 100644 --- a/docs/en/cli-reference/storage/update/oos/README.md +++ b/docs/en/cli-reference/storage/update/oos/README.md @@ -18,6 +18,8 @@ COMMANDS: user_principal_auth use an OCI user and an API key for authentication. you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key. https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm + workload_identity_auth use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). + https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm help, h Shows a list of commands or help for one command OPTIONS: diff --git a/docs/en/cli-reference/storage/update/oos/env_auth.md b/docs/en/cli-reference/storage/update/oos/env_auth.md index bf1e7c3e..5f08c9ea 100644 --- a/docs/en/cli-reference/storage/update/oos/env_auth.md +++ b/docs/en/cli-reference/storage/update/oos/env_auth.md @@ -41,9 +41,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -64,6 +63,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -102,7 +113,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -110,6 +121,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -145,7 +166,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -162,6 +183,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -172,18 +196,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -201,7 +228,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md b/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md index 8244a64f..94f1d25d 100644 --- a/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md +++ b/docs/en/cli-reference/storage/update/oos/instance_principal_auth.md @@ -45,9 +45,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -68,6 +67,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -106,7 +117,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -114,6 +125,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -149,7 +170,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -166,6 +187,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -176,18 +200,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -205,7 +232,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/oos/no_auth.md b/docs/en/cli-reference/storage/update/oos/no_auth.md index 7c71a8c2..64d3463d 100644 --- a/docs/en/cli-reference/storage/update/oos/no_auth.md +++ b/docs/en/cli-reference/storage/update/oos/no_auth.md @@ -38,9 +38,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -61,6 +60,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -99,7 +110,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -107,6 +118,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -142,7 +163,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -159,6 +180,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --endpoint value Endpoint for Object storage API. [$ENDPOINT] @@ -168,18 +192,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -197,7 +224,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md b/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md index 5f681d52..25c9f4ec 100644 --- a/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md +++ b/docs/en/cli-reference/storage/update/oos/resource_principal_auth.md @@ -41,9 +41,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -64,6 +63,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -102,7 +113,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -110,6 +121,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -145,7 +166,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -162,6 +183,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -172,18 +196,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -201,7 +228,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/oos/user_principal_auth.md b/docs/en/cli-reference/storage/update/oos/user_principal_auth.md index 58cf9cee..7718a195 100644 --- a/docs/en/cli-reference/storage/update/oos/user_principal_auth.md +++ b/docs/en/cli-reference/storage/update/oos/user_principal_auth.md @@ -57,9 +57,8 @@ DESCRIPTION: Chunk size to use for uploading. When uploading files larger than upload_cutoff or files with unknown - size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google - photos or google docs) they will be uploaded as multipart uploads - using this chunk size. + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. Note that "upload_concurrency" chunks of this size are buffered in memory per transfer. @@ -80,6 +79,18 @@ DESCRIPTION: statistics displayed with "-P" flag. + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + --upload-concurrency Concurrency for multipart uploads. @@ -118,7 +129,7 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --leave-parts-on-error - If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. It should be set to true for resuming uploads across different sessions. @@ -126,6 +137,16 @@ DESCRIPTION: additional costs if not cleaned up. + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + --no-check-bucket If set, don't attempt to check the bucket exists or create it. @@ -161,7 +182,7 @@ DESCRIPTION: | | None --sse-kms-key-id - if using using your own master key in vault, this header specifies the + if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. @@ -178,6 +199,9 @@ DESCRIPTION: | | None | AES256 | AES256 + --description + Description of the remote. + OPTIONS: --compartment value Object storage compartment OCID [$COMPARTMENT] @@ -190,18 +214,21 @@ OPTIONS: Advanced + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] - --sse-kms-key-id value if using using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -219,7 +246,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md b/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md new file mode 100644 index 00000000..ce702dad --- /dev/null +++ b/docs/en/cli-reference/storage/update/oos/workload_identity_auth.md @@ -0,0 +1,245 @@ +# use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). +https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update oos workload_identity_auth - use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). + https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm + +USAGE: + singularity storage update oos workload_identity_auth [command options] + +DESCRIPTION: + --namespace + Object storage namespace + + --compartment + Object storage compartment OCID + + --region + Object storage Region + + --endpoint + Endpoint for Object storage API. + + Leave blank to use the default endpoint for the region. + + --storage-tier + The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm + + Examples: + | Standard | Standard storage tier, this is the default tier + | InfrequentAccess | InfrequentAccess storage tier + | Archive | Archive storage tier + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded + as multipart uploads using this chunk size. + + Note that "upload_concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + OCI has max parts limit of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --upload-concurrency + Concurrency for multipart uploads. + + This is the number of chunks of the same file that are uploaded + concurrently. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --copy-timeout + Timeout for copy. + + Copy is an asynchronous operation, specify timeout to wait for copy to succeed + + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --leave-parts-on-error + If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + + It should be set to true for resuming uploads across different sessions. + + WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add + additional costs if not cleaned up. + + + --attempt-resume-upload + If true attempt to resume previously started multipart upload for the object. + This will be helpful to speed up multipart transfers by resuming uploads from past session. + + WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is + aborted and a new multipart upload is started with the new chunk size. + + The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. + + + --sse-customer-key-file + To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated + with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.' + + Examples: + | | None + + --sse-customer-key + To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to + encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is + needed. For more information, see Using Your Own Keys for Server-Side Encryption + (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm) + + Examples: + | | None + + --sse-customer-key-sha256 + If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption + key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for + Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm). + + Examples: + | | None + + --sse-kms-key-id + if using your own master key in vault, this header specifies the + OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call + the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. + Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. + + Examples: + | | None + + --sse-customer-algorithm + If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Object Storage supports "AES256" as the encryption algorithm. For more information, see + Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm). + + Examples: + | | None + | AES256 | AES256 + + --description + Description of the remote. + + +OPTIONS: + --compartment value Object storage compartment OCID [$COMPARTMENT] + --endpoint value Endpoint for Object storage API. [$ENDPOINT] + --help, -h show help + --namespace value Object storage namespace [$NAMESPACE] + --region value Object storage Region [$REGION] + + Advanced + + --attempt-resume-upload If true attempt to resume previously started multipart upload for the object. (default: false) [$ATTEMPT_RESUME_UPLOAD] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --copy-timeout value Timeout for copy. (default: "1m0s") [$COPY_TIMEOUT] + --description value Description of the remote. [$DESCRIPTION] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --sse-customer-algorithm value If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to [$SSE_CUSTOMER_KEY] + --sse-customer-key-file value To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated [$SSE_CUSTOMER_KEY_FILE] + --sse-customer-key-sha256 value If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption [$SSE_CUSTOMER_KEY_SHA256] + --sse-kms-key-id value if using your own master key in vault, this header specifies the [$SSE_KMS_KEY_ID] + --storage-tier value The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default: "Standard") [$STORAGE_TIER] + --upload-concurrency value Concurrency for multipart uploads. (default: 10) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/opendrive.md b/docs/en/cli-reference/storage/update/opendrive.md index 8baf395f..0344ccbe 100644 --- a/docs/en/cli-reference/storage/update/opendrive.md +++ b/docs/en/cli-reference/storage/update/opendrive.md @@ -26,6 +26,9 @@ DESCRIPTION: Note that these chunks are buffered in memory so increasing them will increase memory use. + --description + Description of the remote. + OPTIONS: --help, -h show help @@ -34,8 +37,9 @@ OPTIONS: Advanced - --chunk-size value Files will be uploaded in chunks this size. (default: "10Mi") [$CHUNK_SIZE] - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot") [$ENCODING] + --chunk-size value Files will be uploaded in chunks this size. (default: "10Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot") [$ENCODING] Client Config @@ -50,7 +54,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/pcloud.md b/docs/en/cli-reference/storage/update/pcloud.md index 3113a86e..a6620394 100644 --- a/docs/en/cli-reference/storage/update/pcloud.md +++ b/docs/en/cli-reference/storage/update/pcloud.md @@ -62,6 +62,9 @@ DESCRIPTION: --password Your pcloud password. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -71,6 +74,7 @@ OPTIONS: Advanced --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] --hostname value Hostname to connect to. (default: "api.pcloud.com") [$HOSTNAME] --password value Your pcloud password. [$PASSWORD] @@ -92,7 +96,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/premiumizeme.md b/docs/en/cli-reference/storage/update/premiumizeme.md index 4ae1b009..ba49fb4c 100644 --- a/docs/en/cli-reference/storage/update/premiumizeme.md +++ b/docs/en/cli-reference/storage/update/premiumizeme.md @@ -9,6 +9,29 @@ USAGE: singularity storage update premiumizeme [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --api-key API Key. @@ -20,14 +43,23 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --api-key value API Key. [$API_KEY] - --help, -h show help + --api-key value API Key. [$API_KEY] + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help Advanced - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -42,7 +74,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/putio.md b/docs/en/cli-reference/storage/update/putio.md index da3c5bc1..9e2b9a2d 100644 --- a/docs/en/cli-reference/storage/update/putio.md +++ b/docs/en/cli-reference/storage/update/putio.md @@ -9,18 +9,50 @@ USAGE: singularity storage update putio [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: - --help, -h show help + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] + --help, -h show help Advanced - --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -35,7 +67,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/qingstor.md b/docs/en/cli-reference/storage/update/qingstor.md index abac9824..90fa3824 100644 --- a/docs/en/cli-reference/storage/update/qingstor.md +++ b/docs/en/cli-reference/storage/update/qingstor.md @@ -85,6 +85,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-key-id value QingStor Access Key ID. [$ACCESS_KEY_ID] @@ -98,6 +101,7 @@ OPTIONS: --chunk-size value Chunk size to use for uploading. (default: "4Mi") [$CHUNK_SIZE] --connection-retries value Number of connection retries. (default: 3) [$CONNECTION_RETRIES] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Ctl,InvalidUtf8") [$ENCODING] --upload-concurrency value Concurrency for multipart uploads. (default: 1) [$UPLOAD_CONCURRENCY] --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] @@ -115,7 +119,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/README.md b/docs/en/cli-reference/storage/update/s3/README.md index bf2ec7a4..78f83e92 100644 --- a/docs/en/cli-reference/storage/update/s3/README.md +++ b/docs/en/cli-reference/storage/update/s3/README.md @@ -1,9 +1,9 @@ -# Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi +# Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others {% code fullWidth="true" %} ``` NAME: - singularity storage update s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + singularity storage update s3 - Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others USAGE: singularity storage update s3 command [command options] @@ -17,21 +17,28 @@ COMMANDS: cloudflare Cloudflare R2 Storage digitalocean DigitalOcean Spaces dreamhost Dreamhost DreamObjects + gcs Google Cloud Storage huaweiobs Huawei Object Storage Service ibmcos IBM COS S3 idrive IDrive e2 ionos IONOS Cloud + leviia Leviia Object Storage liara Liara Object Storage + linode Linode Object Storage lyvecloud Seagate Lyve Cloud + magalu Magalu Object Storage minio Minio Object Storage netease Netease Object Storage (NOS) other Any other S3 compatible provider + petabox Petabox Object Storage qiniu Qiniu Object Storage (Kodo) rackcorp RackCorp Object Storage + rclone Rclone S3 Server scaleway Scaleway Object Storage seaweedfs SeaweedFS S3 stackpath StackPath Object Storage storj Storj (S3 Compatible Gateway) + synology Synology C2 Object Storage tencentcos Tencent Cloud Object Storage (COS) wasabi Wasabi Object Storage help, h Shows a list of commands or help for one command diff --git a/docs/en/cli-reference/storage/update/s3/alibaba.md b/docs/en/cli-reference/storage/update/s3/alibaba.md index 3895d4c6..bfae28c5 100644 --- a/docs/en/cli-reference/storage/update/s3/alibaba.md +++ b/docs/en/cli-reference/storage/update/s3/alibaba.md @@ -196,10 +196,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -216,6 +216,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -225,6 +229,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -309,13 +318,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -333,12 +339,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -366,6 +389,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -400,9 +434,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -415,36 +522,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -459,7 +575,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/arvancloud.md b/docs/en/cli-reference/storage/update/s3/arvancloud.md index 8bd09f71..48da5f74 100644 --- a/docs/en/cli-reference/storage/update/s3/arvancloud.md +++ b/docs/en/cli-reference/storage/update/s3/arvancloud.md @@ -32,9 +32,9 @@ DESCRIPTION: Endpoint for Arvan Cloud Object Storage (AOS) API. Examples: - | s3.ir-thr-at1.arvanstorage.com | The default endpoint - a good choice if you are unsure. - | | Tehran Iran (Asiatech) - | s3.ir-tbz-sh1.arvanstorage.com | Tabriz Iran (Shahriar) + | s3.ir-thr-at1.arvanstorage.ir | The default endpoint - a good choice if you are unsure. + | | Tehran Iran (Simin) + | s3.ir-tbz-sh1.arvanstorage.ir | Tabriz Iran (Shahriar) --location-constraint Location constraint - must match endpoint. @@ -42,7 +42,7 @@ DESCRIPTION: Used when creating buckets only. Examples: - | ir-thr-at1 | Tehran Iran (Asiatech) + | ir-thr-at1 | Tehran Iran (Simin) | ir-tbz-sh1 | Tabriz Iran (Shahriar) --acl @@ -180,10 +180,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -200,6 +200,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -209,6 +213,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -293,13 +302,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -317,12 +323,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -350,6 +373,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -384,9 +418,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -400,36 +507,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -444,7 +560,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/aws.md b/docs/en/cli-reference/storage/update/s3/aws.md index 925f9d90..af0af55b 100644 --- a/docs/en/cli-reference/storage/update/s3/aws.md +++ b/docs/en/cli-reference/storage/update/s3/aws.md @@ -71,6 +71,8 @@ DESCRIPTION: | | Needs location constraint ap-east-1. | sa-east-1 | South America (Sao Paulo) Region. | | Needs location constraint sa-east-1. + | il-central-1 | Israel (Tel Aviv) Region. + | | Needs location constraint il-central-1. | me-south-1 | Middle East (Bahrain) Region. | | Needs location constraint me-south-1. | af-south-1 | Africa (Cape Town) Region. @@ -114,6 +116,7 @@ DESCRIPTION: | ap-south-1 | Asia Pacific (Mumbai) Region | ap-east-1 | Asia Pacific (Hong Kong) Region | sa-east-1 | South America (Sao Paulo) Region + | il-central-1 | Israel (Tel Aviv) Region | me-south-1 | Middle East (Bahrain) Region | af-south-1 | Africa (Cape Town) Region | cn-north-1 | China (Beijing) Region @@ -313,10 +316,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -333,6 +336,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -342,6 +349,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. @@ -439,13 +451,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -463,12 +472,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -496,6 +522,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -530,14 +567,87 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata --sts-endpoint - Endpoint for STS. + Endpoint for STS (deprecated). Leave blank if using AWS to use the default endpoint for the region. + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -554,44 +664,53 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --requester-pays Enables requester pays option when interacting with S3 bucket. (default: false) [$REQUESTER_PAYS] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --sts-endpoint value Endpoint for STS. [$STS_ENDPOINT] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. (default: false) [$USE_ACCELERATE_ENDPOINT] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. (default: false) [$LEAVE_PARTS_ON_ERROR] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --requester-pays Enables requester pays option when interacting with S3 bucket. (default: false) [$REQUESTER_PAYS] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --sts-endpoint value Endpoint for STS (deprecated). [$STS_ENDPOINT] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. (default: false) [$USE_ACCELERATE_ENDPOINT] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -606,7 +725,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/ceph.md b/docs/en/cli-reference/storage/update/s3/ceph.md index 08bd6520..e0d53575 100644 --- a/docs/en/cli-reference/storage/update/s3/ceph.md +++ b/docs/en/cli-reference/storage/update/s3/ceph.md @@ -224,10 +224,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -244,6 +244,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -253,6 +257,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -337,13 +346,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -361,12 +367,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -394,6 +417,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -428,9 +462,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -446,40 +553,49 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -494,7 +610,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/chinamobile.md b/docs/en/cli-reference/storage/update/s3/chinamobile.md index e5d656c0..ebe1a5cf 100644 --- a/docs/en/cli-reference/storage/update/s3/chinamobile.md +++ b/docs/en/cli-reference/storage/update/s3/chinamobile.md @@ -278,10 +278,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -298,6 +298,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -307,6 +311,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -391,13 +400,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -415,12 +421,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -448,6 +471,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -482,9 +516,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -499,40 +606,49 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -547,7 +663,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/cloudflare.md b/docs/en/cli-reference/storage/update/s3/cloudflare.md index a6bc0d3d..b55a44db 100644 --- a/docs/en/cli-reference/storage/update/s3/cloudflare.md +++ b/docs/en/cli-reference/storage/update/s3/cloudflare.md @@ -154,10 +154,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -174,6 +174,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -183,6 +187,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -267,13 +276,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -291,12 +297,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -324,6 +347,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -358,9 +392,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -372,36 +479,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -416,7 +532,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/digitalocean.md b/docs/en/cli-reference/storage/update/s3/digitalocean.md index 3479786d..90dec587 100644 --- a/docs/en/cli-reference/storage/update/s3/digitalocean.md +++ b/docs/en/cli-reference/storage/update/s3/digitalocean.md @@ -186,10 +186,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -206,6 +206,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -215,6 +219,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -299,13 +308,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -323,12 +329,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -356,6 +379,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -390,9 +424,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -406,36 +513,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -450,7 +566,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/dreamhost.md b/docs/en/cli-reference/storage/update/s3/dreamhost.md index 8549180d..b433bf91 100644 --- a/docs/en/cli-reference/storage/update/s3/dreamhost.md +++ b/docs/en/cli-reference/storage/update/s3/dreamhost.md @@ -181,10 +181,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -201,6 +201,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -210,6 +214,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -294,13 +303,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -318,12 +324,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -351,6 +374,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -385,9 +419,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -401,36 +508,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -445,7 +561,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/gcs.md b/docs/en/cli-reference/storage/update/s3/gcs.md new file mode 100644 index 00000000..fba04eb7 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/gcs.md @@ -0,0 +1,574 @@ +# Google Cloud Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 gcs - Google Cloud Storage + +USAGE: + singularity storage update s3 gcs [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | | Use this if unsure. + | | Will use v4 signatures and an empty region. + | other-v2-signature | Use this only if v4 signatures don't work. + | | E.g. pre Jewel/v10 CEPH. + + --endpoint + Endpoint for Google Cloud Storage. + + Examples: + | https://storage.googleapis.com | Google Cloud Storage endpoint + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for Google Cloud Storage. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/huaweiobs.md b/docs/en/cli-reference/storage/update/s3/huaweiobs.md index a2ad9423..cb602aa9 100644 --- a/docs/en/cli-reference/storage/update/s3/huaweiobs.md +++ b/docs/en/cli-reference/storage/update/s3/huaweiobs.md @@ -198,10 +198,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -218,6 +218,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -227,6 +231,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -311,13 +320,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -335,12 +341,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -368,6 +391,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -402,9 +436,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -417,36 +524,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -461,7 +577,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/ibmcos.md b/docs/en/cli-reference/storage/update/s3/ibmcos.md index d63ad75e..f7785224 100644 --- a/docs/en/cli-reference/storage/update/s3/ibmcos.md +++ b/docs/en/cli-reference/storage/update/s3/ibmcos.md @@ -291,10 +291,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -311,6 +311,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -320,6 +324,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -404,13 +413,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -428,12 +434,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -461,6 +484,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -495,9 +529,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -511,36 +618,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -555,7 +671,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/idrive.md b/docs/en/cli-reference/storage/update/s3/idrive.md index fb115871..98a08f53 100644 --- a/docs/en/cli-reference/storage/update/s3/idrive.md +++ b/docs/en/cli-reference/storage/update/s3/idrive.md @@ -157,10 +157,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -177,6 +177,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -186,6 +190,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -270,13 +279,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -294,12 +300,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -327,6 +350,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -361,9 +395,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -374,36 +481,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -418,7 +534,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/ionos.md b/docs/en/cli-reference/storage/update/s3/ionos.md index c020fc3c..d67fe363 100644 --- a/docs/en/cli-reference/storage/update/s3/ionos.md +++ b/docs/en/cli-reference/storage/update/s3/ionos.md @@ -176,10 +176,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -196,6 +196,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -205,6 +209,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -289,13 +298,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -313,12 +319,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -346,6 +369,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -380,9 +414,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -395,36 +502,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -439,7 +555,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/leviia.md b/docs/en/cli-reference/storage/update/s3/leviia.md new file mode 100644 index 00000000..801667d1 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/leviia.md @@ -0,0 +1,567 @@ +# Leviia Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 leviia - Leviia Object Storage + +USAGE: + singularity storage update s3 leviia [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | | Use this if unsure. + | | Will use v4 signatures and an empty region. + | other-v2-signature | Use this only if v4 signatures don't work. + | | E.g. pre Jewel/v10 CEPH. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/liara.md b/docs/en/cli-reference/storage/update/s3/liara.md index b9911e69..06034a8f 100644 --- a/docs/en/cli-reference/storage/update/s3/liara.md +++ b/docs/en/cli-reference/storage/update/s3/liara.md @@ -170,10 +170,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -190,6 +190,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -199,6 +203,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -283,13 +292,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -307,12 +313,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -340,6 +363,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -374,9 +408,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -389,36 +496,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -433,7 +549,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/linode.md b/docs/en/cli-reference/storage/update/s3/linode.md new file mode 100644 index 00000000..645da4ce --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/linode.md @@ -0,0 +1,565 @@ +# Linode Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 linode - Linode Object Storage + +USAGE: + singularity storage update s3 linode [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for Linode Object Storage API. + + Examples: + | us-southeast-1.linodeobjects.com | Atlanta, GA (USA), us-southeast-1 + | us-ord-1.linodeobjects.com | Chicago, IL (USA), us-ord-1 + | eu-central-1.linodeobjects.com | Frankfurt (Germany), eu-central-1 + | it-mil-1.linodeobjects.com | Milan (Italy), it-mil-1 + | us-east-1.linodeobjects.com | Newark, NJ (USA), us-east-1 + | fr-par-1.linodeobjects.com | Paris (France), fr-par-1 + | us-sea-1.linodeobjects.com | Seattle, WA (USA), us-sea-1 + | ap-south-1.linodeobjects.com | Singapore ap-south-1 + | se-sto-1.linodeobjects.com | Stockholm (Sweden), se-sto-1 + | us-iad-1.linodeobjects.com | Washington, DC, (USA), us-iad-1 + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for Linode Object Storage API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/lyvecloud.md b/docs/en/cli-reference/storage/update/s3/lyvecloud.md index 7980a581..54afbc6a 100644 --- a/docs/en/cli-reference/storage/update/s3/lyvecloud.md +++ b/docs/en/cli-reference/storage/update/s3/lyvecloud.md @@ -183,10 +183,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -203,6 +203,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -212,6 +216,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -296,13 +305,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -320,12 +326,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -353,6 +376,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -387,9 +421,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -403,36 +510,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -447,7 +563,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/magalu.md b/docs/en/cli-reference/storage/update/s3/magalu.md new file mode 100644 index 00000000..f324f6de --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/magalu.md @@ -0,0 +1,566 @@ +# Magalu Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 magalu - Magalu Object Storage + +USAGE: + singularity storage update s3 magalu [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + Examples: + | br-se1.magaluobjects.com | Magalu BR Southeast 1 endpoint + | br-ne1.magaluobjects.com | Magalu BR Northeast 1 endpoint + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --storage-class + The storage class to use when storing new objects in Magalu. + + Examples: + | STANDARD | Standard storage class + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + --storage-class value The storage class to use when storing new objects in Magalu. [$STORAGE_CLASS] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/minio.md b/docs/en/cli-reference/storage/update/s3/minio.md index 7dce5925..8104f41e 100644 --- a/docs/en/cli-reference/storage/update/s3/minio.md +++ b/docs/en/cli-reference/storage/update/s3/minio.md @@ -224,10 +224,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -244,6 +244,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -253,6 +257,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -337,13 +346,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -361,12 +367,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -394,6 +417,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -428,9 +462,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -446,40 +553,49 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] - --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] - --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] - --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --sse-customer-algorithm value If using SSE-C, the server-side encryption algorithm used when storing this object in S3. [$SSE_CUSTOMER_ALGORITHM] + --sse-customer-key value To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY] + --sse-customer-key-base64 value If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data. [$SSE_CUSTOMER_KEY_BASE64] + --sse-customer-key-md5 value If using SSE-C you may provide the secret encryption key MD5 checksum (optional). [$SSE_CUSTOMER_KEY_MD5] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -494,7 +610,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/netease.md b/docs/en/cli-reference/storage/update/s3/netease.md index 6d87ff4a..73d5dfe9 100644 --- a/docs/en/cli-reference/storage/update/s3/netease.md +++ b/docs/en/cli-reference/storage/update/s3/netease.md @@ -178,10 +178,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -198,6 +198,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -207,6 +211,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -291,13 +300,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -315,12 +321,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -348,6 +371,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -382,9 +416,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -398,36 +505,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -442,7 +558,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/other.md b/docs/en/cli-reference/storage/update/s3/other.md index c9769017..1e276976 100644 --- a/docs/en/cli-reference/storage/update/s3/other.md +++ b/docs/en/cli-reference/storage/update/s3/other.md @@ -178,10 +178,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -198,6 +198,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -207,6 +211,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -291,13 +300,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -315,12 +321,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -348,6 +371,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -382,9 +416,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -398,36 +505,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -442,7 +558,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/petabox.md b/docs/en/cli-reference/storage/update/s3/petabox.md new file mode 100644 index 00000000..34574aa4 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/petabox.md @@ -0,0 +1,575 @@ +# Petabox Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 petabox - Petabox Object Storage + +USAGE: + singularity storage update s3 petabox [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region where your bucket will be created and your data stored. + + + Examples: + | us-east-1 | US East (N. Virginia) + | eu-central-1 | Europe (Frankfurt) + | ap-southeast-1 | Asia Pacific (Singapore) + | me-south-1 | Middle East (Bahrain) + | sa-east-1 | South America (São Paulo) + + --endpoint + Endpoint for Petabox S3 Object Storage. + + Specify the endpoint from the same region. + + Examples: + | s3.petabox.io | US East (N. Virginia) + | s3.us-east-1.petabox.io | US East (N. Virginia) + | s3.eu-central-1.petabox.io | Europe (Frankfurt) + | s3.ap-southeast-1.petabox.io | Asia Pacific (Singapore) + | s3.me-south-1.petabox.io | Middle East (Bahrain) + | s3.sa-east-1.petabox.io | South America (São Paulo) + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for Petabox S3 Object Storage. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --region value Region where your bucket will be created and your data stored. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/qiniu.md b/docs/en/cli-reference/storage/update/s3/qiniu.md index 2237ef27..9e01d2f2 100644 --- a/docs/en/cli-reference/storage/update/s3/qiniu.md +++ b/docs/en/cli-reference/storage/update/s3/qiniu.md @@ -212,10 +212,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -232,6 +232,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -241,6 +245,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -325,13 +334,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -349,12 +355,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -382,6 +405,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -416,9 +450,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -433,36 +540,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -477,7 +593,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/rackcorp.md b/docs/en/cli-reference/storage/update/s3/rackcorp.md index 7c3624c9..0729a33a 100644 --- a/docs/en/cli-reference/storage/update/s3/rackcorp.md +++ b/docs/en/cli-reference/storage/update/s3/rackcorp.md @@ -231,10 +231,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -251,6 +251,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -260,6 +264,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -344,13 +353,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -368,12 +374,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -401,6 +424,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -435,9 +469,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -451,36 +558,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -495,7 +611,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/rclone.md b/docs/en/cli-reference/storage/update/s3/rclone.md new file mode 100644 index 00000000..a95e51d9 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/rclone.md @@ -0,0 +1,573 @@ +# Rclone S3 Server + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 rclone - Rclone S3 Server + +USAGE: + singularity storage update s3 rclone [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region to connect to. + + Leave blank if you are using an S3 clone and you don't have a region. + + Examples: + | | Use this if unsure. + | | Will use v4 signatures and an empty region. + | other-v2-signature | Use this only if v4 signatures don't work. + | | E.g. pre Jewel/v10 CEPH. + + --endpoint + Endpoint for S3 API. + + Required when using an S3 clone. + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --acl + Canned ACL used when creating buckets and storing or copying objects. + + This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when server-side copying objects as S3 + doesn't copy the ACL from the source but rather writes a fresh one. + + If the acl is an empty string then no X-Amz-Acl: header is added and + the default (private) will be used. + + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --acl value Canned ACL used when creating buckets and storing or copying objects. [$ACL] + --endpoint value Endpoint for S3 API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region to connect to. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/scaleway.md b/docs/en/cli-reference/storage/update/s3/scaleway.md index 4180e50c..630ac70a 100644 --- a/docs/en/cli-reference/storage/update/s3/scaleway.md +++ b/docs/en/cli-reference/storage/update/s3/scaleway.md @@ -85,11 +85,16 @@ DESCRIPTION: The storage class to use when storing new objects in S3. Examples: - | | Default. - | STANDARD | The Standard class for any upload. - | | Suitable for on-demand content like streaming or CDN. - | GLACIER | Archived storage. - | | Prices are lower, but it needs to be restored first to be accessed. + | | Default. + | STANDARD | The Standard class for any upload. + | | Suitable for on-demand content like streaming or CDN. + | | Available in all regions. + | GLACIER | Archived storage. + | | Prices are lower, but it needs to be restored first to be accessed. + | | Available in FR-PAR and NL-AMS regions. + | ONEZONE_IA | One Zone - Infrequent Access. + | | A good choice for storing secondary backup copies or easily re-creatable data. + | | Available in the FR-PAR region only. --upload-cutoff Cutoff for switching to chunked upload. @@ -183,10 +188,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -203,6 +208,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -212,6 +221,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -296,13 +310,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -320,12 +331,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -353,6 +381,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -387,9 +426,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -403,36 +515,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -447,7 +568,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/seaweedfs.md b/docs/en/cli-reference/storage/update/s3/seaweedfs.md index ea0f8ef6..af0560b4 100644 --- a/docs/en/cli-reference/storage/update/s3/seaweedfs.md +++ b/docs/en/cli-reference/storage/update/s3/seaweedfs.md @@ -181,10 +181,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -201,6 +201,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -210,6 +214,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -294,13 +303,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -318,12 +324,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -351,6 +374,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -385,9 +419,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -401,36 +508,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -445,7 +561,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/stackpath.md b/docs/en/cli-reference/storage/update/s3/stackpath.md index d4ce4ff5..91819dfb 100644 --- a/docs/en/cli-reference/storage/update/s3/stackpath.md +++ b/docs/en/cli-reference/storage/update/s3/stackpath.md @@ -176,10 +176,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -196,6 +196,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -205,6 +209,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -289,13 +298,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -313,12 +319,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -346,6 +369,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -380,9 +414,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -395,36 +502,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -439,7 +555,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/storj.md b/docs/en/cli-reference/storage/update/s3/storj.md index 69ae96c0..d367d255 100644 --- a/docs/en/cli-reference/storage/update/s3/storj.md +++ b/docs/en/cli-reference/storage/update/s3/storj.md @@ -149,10 +149,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -169,6 +169,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -178,6 +182,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -262,13 +271,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -286,12 +292,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -319,6 +342,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -353,9 +387,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -366,36 +473,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -410,7 +526,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/synology.md b/docs/en/cli-reference/storage/update/s3/synology.md new file mode 100644 index 00000000..d8e3e664 --- /dev/null +++ b/docs/en/cli-reference/storage/update/s3/synology.md @@ -0,0 +1,563 @@ +# Synology C2 Object Storage + +{% code fullWidth="true" %} +``` +NAME: + singularity storage update s3 synology - Synology C2 Object Storage + +USAGE: + singularity storage update s3 synology [command options] + +DESCRIPTION: + --env-auth + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + + Only applies if access_key_id and secret_access_key is blank. + + Examples: + | false | Enter AWS credentials in the next step. + | true | Get AWS credentials from the environment (env vars or IAM). + + --access-key-id + AWS Access Key ID. + + Leave blank for anonymous access or runtime credentials. + + --secret-access-key + AWS Secret Access Key (password). + + Leave blank for anonymous access or runtime credentials. + + --region + Region where your data stored. + + + Examples: + | eu-001 | Europe Region 1 + | eu-002 | Europe Region 2 + | us-001 | US Region 1 + | us-002 | US Region 2 + | tw-001 | Asia (Taiwan) + + --endpoint + Endpoint for Synology C2 Object Storage API. + + Examples: + | eu-001.s3.synologyc2.net | EU Endpoint 1 + | eu-002.s3.synologyc2.net | EU Endpoint 2 + | us-001.s3.synologyc2.net | US Endpoint 1 + | us-002.s3.synologyc2.net | US Endpoint 2 + | tw-001.s3.synologyc2.net | TW Endpoint 1 + + --location-constraint + Location constraint - must be set to match the Region. + + Leave blank if not sure. Used when creating buckets only. + + --bucket-acl + Canned ACL used when creating buckets. + + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + + Note that this ACL is applied when only when creating buckets. If it + isn't set then "acl" is used instead. + + If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: + header is added and the default (private) will be used. + + + Examples: + | private | Owner gets FULL_CONTROL. + | | No one else has access rights (default). + | public-read | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ access. + | public-read-write | Owner gets FULL_CONTROL. + | | The AllUsers group gets READ and WRITE access. + | | Granting this on a bucket is generally not recommended. + | authenticated-read | Owner gets FULL_CONTROL. + | | The AuthenticatedUsers group gets READ access. + + --upload-cutoff + Cutoff for switching to chunked upload. + + Any files larger than this will be uploaded in chunks of chunk_size. + The minimum is 0 and the maximum is 5 GiB. + + --chunk-size + Chunk size to use for uploading. + + When uploading files larger than upload_cutoff or files with unknown + size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + photos or google docs) they will be uploaded as multipart uploads + using this chunk size. + + Note that "--s3-upload-concurrency" chunks of this size are buffered + in memory per transfer. + + If you are transferring large files over high-speed links and you have + enough memory, then increasing this will speed up the transfers. + + Rclone will automatically increase the chunk size when uploading a + large file of known size to stay below the 10,000 chunks limit. + + Files of unknown size are uploaded with the configured + chunk_size. Since the default chunk size is 5 MiB and there can be at + most 10,000 chunks, this means that by default the maximum size of + a file you can stream upload is 48 GiB. If you wish to stream upload + larger files then you will need to increase chunk_size. + + Increasing the chunk size decreases the accuracy of the progress + statistics displayed with "-P" flag. Rclone treats chunk as sent when + it's buffered by the AWS SDK, when in fact it may still be uploading. + A bigger chunk size means a bigger AWS SDK buffer and progress + reporting more deviating from the truth. + + + --max-upload-parts + Maximum number of parts in a multipart upload. + + This option defines the maximum number of multipart chunks to use + when doing a multipart upload. + + This can be useful if a service does not support the AWS S3 + specification of 10,000 chunks. + + Rclone will automatically increase the chunk size when uploading a + large file of a known size to stay below this number of chunks limit. + + + --copy-cutoff + Cutoff for switching to multipart copy. + + Any files larger than this that need to be server-side copied will be + copied in chunks of this size. + + The minimum is 0 and the maximum is 5 GiB. + + --disable-checksum + Don't store MD5 checksum with object metadata. + + Normally rclone will calculate the MD5 checksum of the input before + uploading it so it can add it to metadata on the object. This is great + for data integrity checking but can cause long delays for large files + to start uploading. + + --shared-credentials-file + Path to the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. + + If this variable is empty rclone will look for the + "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" + + + --profile + Profile to use in the shared credentials file. + + If env_auth = true then rclone can use a shared credentials file. This + variable controls which profile is used in that file. + + If empty it will default to the environment variable "AWS_PROFILE" or + "default" if that environment variable is also not set. + + + --session-token + An AWS session token. + + --upload-concurrency + Concurrency for multipart uploads and copies. + + This is the number of chunks of the same file that are uploaded + concurrently for multipart uploads and copies. + + If you are uploading small numbers of large files over high-speed links + and these uploads do not fully utilize your bandwidth, then increasing + this may help to speed up the transfers. + + --force-path-style + If true use path style access if false use virtual hosted style. + + If this is true (the default) then rclone will use path style access, + if false then rclone will use virtual path style. See [the AWS S3 + docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + for more info. + + Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + false - rclone will do this automatically based on the provider + setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + + + --v2-auth + If true use v2 authentication. + + If this is false (the default) then rclone will use v4 authentication. + If it is set then rclone will use v2 authentication. + + Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + + --list-chunk + Size of listing chunk (response list for each ListObject S3 request). + + This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + Most services truncate the response list to 1000 objects even if requested more than that. + In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + In Ceph, this can be increased with the "rgw list buckets max chunk" option. + + + --list-version + Version of ListObjects to use: 1,2 or 0 for auto. + + When S3 originally launched it only provided the ListObjects call to + enumerate objects in a bucket. + + However in May 2016 the ListObjectsV2 call was introduced. This is + much higher performance and should be used if at all possible. + + If set to the default, 0, rclone will guess according to the provider + set which list objects method to call. If it guesses wrong, then it + may be set manually here. + + + --list-url-encode + Whether to url encode listings: true/false/unset + + Some providers support URL encoding listings and where this is + available this is more reliable when using control characters in file + names. If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --no-check-bucket + If set, don't attempt to check the bucket exists or create it. + + This can be useful when trying to minimise the number of transactions + rclone does if you know the bucket exists already. + + It can also be needed if the user you are using does not have bucket + creation permissions. Before v1.52.0 this would have passed silently + due to a bug. + + + --no-head + If set, don't HEAD uploaded objects to check integrity. + + This can be useful when trying to minimise the number of transactions + rclone does. + + Setting it means that if rclone receives a 200 OK message after + uploading an object with PUT then it will assume that it got uploaded + properly. + + In particular it will assume: + + - the metadata, including modtime, storage class and content type was as uploaded + - the size was as uploaded + + It reads the following items from the response for a single part PUT: + + - the MD5SUM + - The uploaded date + + For multipart uploads these items aren't read. + + If an source object of unknown length is uploaded then rclone **will** do a + HEAD request. + + Setting this flag increases the chance for undetected upload failures, + in particular an incorrect size, so it isn't recommended for normal + operation. In practice the chance of an undetected upload failure is + very small even with this flag. + + + --no-head-object + If set, do not do HEAD before GET when getting objects. + + --encoding + The encoding for the backend. + + See the [encoding section in the overview](/overview/#encoding) for more info. + + --memory-pool-flush-time + How often internal memory buffer pools will be flushed. (no longer used) + + --memory-pool-use-mmap + Whether to use mmap buffers in internal memory pool. (no longer used) + + --disable-http2 + Disable usage of http2 for S3 backends. + + There is currently an unsolved issue with the s3 (specifically minio) backend + and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + disabled here. When the issue is solved this flag will be removed. + + See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + + + + --download-url + Custom endpoint for downloads. + This is usually set to a CloudFront CDN URL as AWS S3 offers + cheaper egress for data downloaded through the CloudFront network. + + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + + --use-multipart-etag + Whether to use ETag in multipart uploads for verification + + This should be true, false or left unset to use the default for the provider. + + + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + + --use-presigned-request + Whether to use a presigned request or PutObject for single part uploads + + If this is false rclone will use PutObject from the AWS SDK to upload + an object. + + Versions of rclone < 1.59 use presigned requests to upload a single + part object and setting this flag to true will re-enable that + functionality. This shouldn't be necessary except in exceptional + circumstances or for testing. + + + --versions + Include old versions in directory listings. + + --version-at + Show file versions as they were at the specified time. + + The parameter should be a date, "2006-01-02", datetime "2006-01-02 + 15:04:05" or a duration for that long ago, eg "100d" or "1h". + + Note that when using this no file write operations are permitted, + so you can't upload files or delete them. + + See [the time option docs](/docs/#time-option) for valid formats. + + + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + + --decompress + If set this will decompress gzip encoded objects. + + It is possible to upload objects to S3 with "Content-Encoding: gzip" + set. Normally rclone will download these files as compressed objects. + + If this flag is set then rclone will decompress these files with + "Content-Encoding: gzip" as they are received. This means that rclone + can't check the size and hash but the file contents will be decompressed. + + + --might-gzip + Set this if the backend might gzip objects. + + Normally providers will not alter objects when they are downloaded. If + an object was not uploaded with `Content-Encoding: gzip` then it won't + be set on download. + + However some providers may gzip objects even if they weren't uploaded + with `Content-Encoding: gzip` (eg Cloudflare). + + A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + + If you set this flag and rclone downloads an object with + Content-Encoding: gzip set and chunked transfer encoding, then rclone + will decompress the object on the fly. + + If this is set to unset (the default) then rclone will choose + according to the provider setting what to apply, but you can override + rclone's choice here. + + + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + + --no-system-metadata + Suppress setting and reading of system metadata + + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + + +OPTIONS: + --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] + --endpoint value Endpoint for Synology C2 Object Storage API. [$ENDPOINT] + --env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). (default: false) [$ENV_AUTH] + --help, -h show help + --location-constraint value Location constraint - must be set to match the Region. [$LOCATION_CONSTRAINT] + --region value Region where your data stored. [$REGION] + --secret-access-key value AWS Secret Access Key (password). [$SECRET_ACCESS_KEY] + + Advanced + + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] + + Client Config + + --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. + --client-cert value Path to Client SSL certificate (PEM) for mutual TLS auth. To remove, use empty string. + --client-connect-timeout value HTTP Client Connect timeout (default: 1m0s) + --client-expect-continue-timeout value Timeout when using expect / 100-continue in HTTP (default: 1s) + --client-header value [ --client-header value ] Set HTTP header for all transactions (i.e. key=value). This will replace the existing header values. To remove a header, use --http-header "key="". To remove all headers, use --http-header "" + --client-insecure-skip-verify Do not verify the server SSL certificate (insecure) (default: false) + --client-key value Path to Client SSL private key (PEM) for mutual TLS auth. To remove, use empty string. + --client-no-gzip Don't set Accept-Encoding: gzip (default: false) + --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) + --client-timeout value IO idle timeout (default: 5m0s) + --client-use-server-mod-time Use server modified time if possible (default: false) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) + + Retry Strategy + + --client-low-level-retries value Maximum number of retries for low-level client errors (default: 10) + --client-retry-backoff value The constant delay backoff for retrying IO read errors (default: 1s) + --client-retry-backoff-exp value The exponential delay backoff for retrying IO read errors (default: 1.0) + --client-retry-delay value The initial delay before retrying IO read errors (default: 1s) + --client-retry-max value Max number of retries for IO read errors (default: 10) + --client-skip-inaccessible Skip inaccessible files when opening (default: false) + +``` +{% endcode %} diff --git a/docs/en/cli-reference/storage/update/s3/tencentcos.md b/docs/en/cli-reference/storage/update/s3/tencentcos.md index 6a9183e0..23ea6e14 100644 --- a/docs/en/cli-reference/storage/update/s3/tencentcos.md +++ b/docs/en/cli-reference/storage/update/s3/tencentcos.md @@ -194,10 +194,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -214,6 +214,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -223,6 +227,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -307,13 +316,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -331,12 +337,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -364,6 +387,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -398,9 +432,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -413,36 +520,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -457,7 +573,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/s3/wasabi.md b/docs/en/cli-reference/storage/update/s3/wasabi.md index ca4eba34..e5d0d4c0 100644 --- a/docs/en/cli-reference/storage/update/s3/wasabi.md +++ b/docs/en/cli-reference/storage/update/s3/wasabi.md @@ -193,10 +193,10 @@ DESCRIPTION: An AWS session token. --upload-concurrency - Concurrency for multipart uploads. + Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded - concurrently. + concurrently for multipart uploads and copies. If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing @@ -213,6 +213,10 @@ DESCRIPTION: Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. + + Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, + you'll need to set this to true. + --v2-auth If true use v2 authentication. @@ -222,6 +226,11 @@ DESCRIPTION: Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + --use-dual-stack + If true use AWS S3 dual-stack endpoint (IPv6 support). + + See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html) + --list-chunk Size of listing chunk (response list for each ListObject S3 request). @@ -306,13 +315,10 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. --memory-pool-flush-time - How often internal memory buffer pools will be flushed. - - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. - This option controls how often unused buffers will be removed from the pool. + How often internal memory buffer pools will be flushed. (no longer used) --memory-pool-use-mmap - Whether to use mmap buffers in internal memory pool. + Whether to use mmap buffers in internal memory pool. (no longer used) --disable-http2 Disable usage of http2 for S3 backends. @@ -330,12 +336,29 @@ DESCRIPTION: This is usually set to a CloudFront CDN URL as AWS S3 offers cheaper egress for data downloaded through the CloudFront network. + --directory-markers + Upload an empty object with a trailing slash when a new directory is created + + Empty folders are unsupported for bucket based remotes, this option creates an empty + object ending with "/", to persist the folder. + + --use-multipart-etag Whether to use ETag in multipart uploads for verification This should be true, false or left unset to use the default for the provider. + --use-unsigned-payload + Whether to use an unsigned payload in PutObject + + Rclone has to avoid the AWS SDK seeking the body when calling + PutObject. The AWS provider can add checksums in the trailer to avoid + seeking but other providers can't. + + This should be true, false or left unset to use the default for the provider. + + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -363,6 +386,17 @@ DESCRIPTION: See [the time option docs](/docs/#time-option) for valid formats. + --version-deleted + Show deleted file markers when using versions. + + This shows deleted file markers in the listing when using versions. These will appear + as 0 size files. The only operation which can be performed on them is deletion. + + Deleting a delete marker will reveal the previous version. + + Deleted files will always show with a timestamp. + + --decompress If set this will decompress gzip encoded objects. @@ -397,9 +431,82 @@ DESCRIPTION: rclone's choice here. + --use-accept-encoding-gzip + Whether to send `Accept-Encoding: gzip` header. + + By default, rclone will append `Accept-Encoding: gzip` to the request to download + compressed objects whenever possible. + + However some providers such as Google Cloud Storage may alter the HTTP headers, breaking + the signature of the request. + + A symptom of this would be receiving errors like + + SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. + + In this case, you might want to try disabling this option. + + --no-system-metadata Suppress setting and reading of system metadata + --use-already-exists + Set if rclone should report BucketAlreadyExists errors on bucket creation. + + At some point during the evolution of the s3 protocol, AWS started + returning an `AlreadyOwnedByYou` error when attempting to create a + bucket that the user already owned, rather than a + `BucketAlreadyExists` error. + + Unfortunately exactly what has been implemented by s3 clones is a + little inconsistent, some return `AlreadyOwnedByYou`, some return + `BucketAlreadyExists` and some return no error at all. + + This is important to rclone because it ensures the bucket exists by + creating it on quite a lot of operations (unless + `--s3-no-check-bucket` is used). + + If rclone knows the provider can return `AlreadyOwnedByYou` or returns + no error then it can report `BucketAlreadyExists` errors when the user + attempts to create a bucket not owned by them. Otherwise rclone + ignores the `BucketAlreadyExists` error which can lead to confusion. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --use-multipart-uploads + Set if rclone should use multipart uploads. + + You can change this if you want to disable the use of multipart uploads. + This shouldn't be necessary in normal operation. + + This should be automatically set correctly for all providers rclone + knows about - please make a bug report if not. + + + --sdk-log-mode + Set to debug the SDK + + This can be set to a comma separated list of the following functions: + + - `Signing` + - `Retries` + - `Request` + - `RequestWithBody` + - `Response` + - `ResponseWithBody` + - `DeprecatedUsage` + - `RequestEventMessage` + - `ResponseEventMessage` + + Use `Off` to disable and `All` to set all log levels. You will need to + use `-vv` to see the debug level logs. + + + --description + Description of the remote. + OPTIONS: --access-key-id value AWS Access Key ID. [$ACCESS_KEY_ID] @@ -413,36 +520,45 @@ OPTIONS: Advanced - --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] - --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] - --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] - --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] - --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] - --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] - --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] - --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] - --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] - --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] - --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] - --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] - --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] - --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (default: false) [$MEMORY_POOL_USE_MMAP] - --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] - --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] - --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] - --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] - --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] - --profile value Profile to use in the shared credentials file. [$PROFILE] - --session-token value An AWS session token. [$SESSION_TOKEN] - --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] - --upload-concurrency value Concurrency for multipart uploads. (default: 4) [$UPLOAD_CONCURRENCY] - --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] - --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] - --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] - --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] - --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] - --versions Include old versions in directory listings. (default: false) [$VERSIONS] + --bucket-acl value Canned ACL used when creating buckets. [$BUCKET_ACL] + --chunk-size value Chunk size to use for uploading. (default: "5Mi") [$CHUNK_SIZE] + --copy-cutoff value Cutoff for switching to multipart copy. (default: "4.656Gi") [$COPY_CUTOFF] + --decompress If set this will decompress gzip encoded objects. (default: false) [$DECOMPRESS] + --description value Description of the remote. [$DESCRIPTION] + --directory-markers Upload an empty object with a trailing slash when a new directory is created (default: false) [$DIRECTORY_MARKERS] + --disable-checksum Don't store MD5 checksum with object metadata. (default: false) [$DISABLE_CHECKSUM] + --disable-http2 Disable usage of http2 for S3 backends. (default: false) [$DISABLE_HTTP2] + --download-url value Custom endpoint for downloads. [$DOWNLOAD_URL] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8,Dot") [$ENCODING] + --force-path-style If true use path style access if false use virtual hosted style. (default: true) [$FORCE_PATH_STYLE] + --list-chunk value Size of listing chunk (response list for each ListObject S3 request). (default: 1000) [$LIST_CHUNK] + --list-url-encode value Whether to url encode listings: true/false/unset (default: "unset") [$LIST_URL_ENCODE] + --list-version value Version of ListObjects to use: 1,2 or 0 for auto. (default: 0) [$LIST_VERSION] + --max-upload-parts value Maximum number of parts in a multipart upload. (default: 10000) [$MAX_UPLOAD_PARTS] + --memory-pool-flush-time value How often internal memory buffer pools will be flushed. (no longer used) (default: "1m0s") [$MEMORY_POOL_FLUSH_TIME] + --memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. (no longer used) (default: false) [$MEMORY_POOL_USE_MMAP] + --might-gzip value Set this if the backend might gzip objects. (default: "unset") [$MIGHT_GZIP] + --no-check-bucket If set, don't attempt to check the bucket exists or create it. (default: false) [$NO_CHECK_BUCKET] + --no-head If set, don't HEAD uploaded objects to check integrity. (default: false) [$NO_HEAD] + --no-head-object If set, do not do HEAD before GET when getting objects. (default: false) [$NO_HEAD_OBJECT] + --no-system-metadata Suppress setting and reading of system metadata (default: false) [$NO_SYSTEM_METADATA] + --profile value Profile to use in the shared credentials file. [$PROFILE] + --sdk-log-mode value Set to debug the SDK (default: "Off") [$SDK_LOG_MODE] + --session-token value An AWS session token. [$SESSION_TOKEN] + --shared-credentials-file value Path to the shared credentials file. [$SHARED_CREDENTIALS_FILE] + --upload-concurrency value Concurrency for multipart uploads and copies. (default: 4) [$UPLOAD_CONCURRENCY] + --upload-cutoff value Cutoff for switching to chunked upload. (default: "200Mi") [$UPLOAD_CUTOFF] + --use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header. (default: "unset") [$USE_ACCEPT_ENCODING_GZIP] + --use-already-exists value Set if rclone should report BucketAlreadyExists errors on bucket creation. (default: "unset") [$USE_ALREADY_EXISTS] + --use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support). (default: false) [$USE_DUAL_STACK] + --use-multipart-etag value Whether to use ETag in multipart uploads for verification (default: "unset") [$USE_MULTIPART_ETAG] + --use-multipart-uploads value Set if rclone should use multipart uploads. (default: "unset") [$USE_MULTIPART_UPLOADS] + --use-presigned-request Whether to use a presigned request or PutObject for single part uploads (default: false) [$USE_PRESIGNED_REQUEST] + --use-unsigned-payload value Whether to use an unsigned payload in PutObject (default: "unset") [$USE_UNSIGNED_PAYLOAD] + --v2-auth If true use v2 authentication. (default: false) [$V2_AUTH] + --version-at value Show file versions as they were at the specified time. (default: "off") [$VERSION_AT] + --version-deleted Show deleted file markers when using versions. (default: false) [$VERSION_DELETED] + --versions Include old versions in directory listings. (default: false) [$VERSIONS] Client Config @@ -457,7 +573,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/seafile.md b/docs/en/cli-reference/storage/update/seafile.md index 8608ea8f..84ce28d8 100644 --- a/docs/en/cli-reference/storage/update/seafile.md +++ b/docs/en/cli-reference/storage/update/seafile.md @@ -45,6 +45,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --2fa Two-factor authentication ('true' if the account has 2FA enabled). (default: false) [$2FA] @@ -58,8 +61,9 @@ OPTIONS: Advanced - --create-library Should rclone create a library if it doesn't exist. (default: false) [$CREATE_LIBRARY] - --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8") [$ENCODING] + --create-library Should rclone create a library if it doesn't exist. (default: false) [$CREATE_LIBRARY] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8") [$ENCODING] Client Config @@ -74,7 +78,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/sftp.md b/docs/en/cli-reference/storage/update/sftp.md index c528ade6..46adbbb6 100644 --- a/docs/en/cli-reference/storage/update/sftp.md +++ b/docs/en/cli-reference/storage/update/sftp.md @@ -26,7 +26,15 @@ DESCRIPTION: --key-pem Raw PEM-encoded private key. - If specified, will override key_file parameter. + Note that this should be on a single line with line endings replaced with '\n', eg + + key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY----- + + This will generate the single line correctly: + + awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa + + If specified, it will override the key_file parameter. --key-file Path to PEM-encoded private key file. @@ -112,6 +120,18 @@ DESCRIPTION: E.g. if home directory can be found in a shared folder called "home": rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory + + To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path. + + E.g. the first example above could be rewritten as: + + rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2 + + Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home". + + E.g. the second example above should be rewritten as: + + rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1 --set-modtime Set the modified time on the remote if set. @@ -147,6 +167,15 @@ DESCRIPTION: Specifies the path or command to run a sftp server on the remote host. The subsystem option is ignored when server_command is defined. + + If adding server_command to the configuration file please note that + it should not be enclosed in quotes, since that will make rclone fail. + + A working example is: + + [remote_name] + type = sftp + server_command = sudo /usr/libexec/openssh/sftp-server --use-fstat If set use fstat instead of stat. @@ -226,6 +255,23 @@ DESCRIPTION: cost of using more memory. + --connections + Maximum number of SFTP simultaneous connections, 0 for unlimited. + + Note that setting this is very likely to cause deadlocks so it should + be used with care. + + If you are doing a sync or copy then make sure connections is one more + than the sum of `--transfers` and `--checkers`. + + If you use `--check-first` then it just needs to be one more than the + maximum of `--checkers` and `--transfers`. + + So for `connections 3` you'd use `--checkers 2 --transfers 2 + --check-first` or `--checkers 1 --transfers 1`. + + + --set-env Environment variables to pass to sftp and commands @@ -239,7 +285,7 @@ DESCRIPTION: VAR1=value VAR2=value - and pass variables with spaces in in quotes, eg + and pass variables with spaces in quotes, eg "VAR3=value with space" "VAR4=value with space" VAR5=nospacehere @@ -279,6 +325,77 @@ DESCRIPTION: umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com + --host-key-algorithms + Space separated list of host key algorithms, ordered by preference. + + At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms. + + Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled. + + Example: + + ssh-ed25519 ssh-rsa ssh-dss + + + --ssh + Path and arguments to external ssh binary. + + Normally rclone will use its internal ssh library to connect to the + SFTP server. However it does not implement all possible ssh options so + it may be desirable to use an external ssh binary. + + Rclone ignores all the internal config if you use this option and + expects you to configure the ssh binary with the user/host/port and + any other options you need. + + **Important** The ssh command must log in without asking for a + password so needs to be configured with keys or certificates. + + Rclone will run the command supplied either with the additional + arguments "-s sftp" to access the SFTP subsystem or with commands such + as "md5sum /path/to/file" appended to read checksums. + + Any arguments with spaces in should be surrounded by "double quotes". + + An example setting might be: + + ssh -o ServerAliveInterval=20 user@example.com + + Note that when using an external ssh binary rclone makes a new ssh + connection for every hash it calculates. + + + --socks-proxy + Socks 5 proxy host. + + Supports the format user:pass@host:port, user@host:port, host:port. + + Example: + + myUser:myPass@localhost:9005 + + + --copy-is-hardlink + Set to enable server side copies using hardlinks. + + The SFTP protocol does not define a copy command so normally server + side copies are not allowed with the sftp backend. + + However the SFTP protocol does support hardlinking, and if you enable + this flag then the sftp backend will support server side copies. These + will be implemented by doing a hardlink from the source to the + destination. + + Not all sftp servers support this. + + Note that hardlinking two files together will use no additional space + as the source and the destination will be the same file. + + This feature may be useful backups made with --copy-dest. + + --description + Description of the remote. + OPTIONS: --disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available. (default: false) [$DISABLE_HASHCHECK] @@ -291,6 +408,7 @@ OPTIONS: --pass value SSH password, leave blank to use ssh-agent. [$PASS] --port value SSH port number. (default: 22) [$PORT] --pubkey-file value Optional path to public key file. [$PUBKEY_FILE] + --ssh value Path and arguments to external ssh binary. [$SSH] --use-insecure-cipher Enable the use of insecure ciphers and key exchange methods. (default: false) [$USE_INSECURE_CIPHER] --user value SSH username. (default: "$USER") [$USER] @@ -300,8 +418,12 @@ OPTIONS: --chunk-size value Upload and download chunk size. (default: "32Ki") [$CHUNK_SIZE] --ciphers value Space separated list of ciphers to be used for session encryption, ordered by preference. [$CIPHERS] --concurrency value The maximum number of outstanding requests for one file (default: 64) [$CONCURRENCY] + --connections value Maximum number of SFTP simultaneous connections, 0 for unlimited. (default: 0) [$CONNECTIONS] + --copy-is-hardlink Set to enable server side copies using hardlinks. (default: false) [$COPY_IS_HARDLINK] + --description value Description of the remote. [$DESCRIPTION] --disable-concurrent-reads If set don't use concurrent reads. (default: false) [$DISABLE_CONCURRENT_READS] --disable-concurrent-writes If set don't use concurrent writes. (default: false) [$DISABLE_CONCURRENT_WRITES] + --host-key-algorithms value Space separated list of host key algorithms, ordered by preference. [$HOST_KEY_ALGORITHMS] --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] --key-exchange value Space separated list of key exchange algorithms, ordered by preference. [$KEY_EXCHANGE] --known-hosts-file value Optional path to known_hosts file. [$KNOWN_HOSTS_FILE] @@ -314,6 +436,7 @@ OPTIONS: --sha1sum-command value The command used to read sha1 hashes. [$SHA1SUM_COMMAND] --shell-type value The type of SSH shell on remote server, if any. [$SHELL_TYPE] --skip-links Set to skip any symlinks and any other non regular files. (default: false) [$SKIP_LINKS] + --socks-proxy value Socks 5 proxy host. [$SOCKS_PROXY] --subsystem value Specifies the SSH2 subsystem on the remote host. (default: "sftp") [$SUBSYSTEM] --use-fstat If set use fstat instead of stat. (default: false) [$USE_FSTAT] @@ -330,7 +453,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/sharefile.md b/docs/en/cli-reference/storage/update/sharefile.md index e21676fd..4b2738a1 100644 --- a/docs/en/cli-reference/storage/update/sharefile.md +++ b/docs/en/cli-reference/storage/update/sharefile.md @@ -9,6 +9,29 @@ USAGE: singularity storage update sharefile [command options] DESCRIPTION: + --client-id + OAuth Client Id. + + Leave blank normally. + + --client-secret + OAuth Client Secret. + + Leave blank normally. + + --token + OAuth Access Token as a JSON blob. + + --auth-url + Auth server URL. + + Leave blank to use the provider defaults. + + --token-url + Token server url. + + Leave blank to use the provider defaults. + --upload-cutoff Cutoff for switching to multipart upload. @@ -47,16 +70,25 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: + --client-id value OAuth Client Id. [$CLIENT_ID] + --client-secret value OAuth Client Secret. [$CLIENT_SECRET] --help, -h show help --root-folder-id value ID of the root folder. [$ROOT_FOLDER_ID] Advanced + --auth-url value Auth server URL. [$AUTH_URL] --chunk-size value Upload chunk size. (default: "64Mi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --endpoint value Endpoint for API calls. [$ENDPOINT] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] --upload-cutoff value Cutoff for switching to multipart upload. (default: "128Mi") [$UPLOAD_CUTOFF] Client Config @@ -72,7 +104,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/sia.md b/docs/en/cli-reference/storage/update/sia.md index 06554bf7..7f94dfaf 100644 --- a/docs/en/cli-reference/storage/update/sia.md +++ b/docs/en/cli-reference/storage/update/sia.md @@ -30,6 +30,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --api-password value Sia Daemon API Password. [$API_PASSWORD] @@ -38,8 +41,9 @@ OPTIONS: Advanced - --encoding value The encoding for the backend. (default: "Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --user-agent value Siad User Agent (default: "Sia-Agent") [$USER_AGENT] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --user-agent value Siad User Agent (default: "Sia-Agent") [$USER_AGENT] Client Config @@ -54,7 +58,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/smb.md b/docs/en/cli-reference/storage/update/smb.md index b56e08b8..978c72dc 100644 --- a/docs/en/cli-reference/storage/update/smb.md +++ b/docs/en/cli-reference/storage/update/smb.md @@ -59,6 +59,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --domain value Domain name for NTLM authentication. (default: "WORKGROUP") [$DOMAIN] @@ -72,6 +75,7 @@ OPTIONS: Advanced --case-insensitive Whether the server is configured to be case-insensitive. (default: true) [$CASE_INSENSITIVE] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot") [$ENCODING] --hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access. (default: true) [$HIDE_SPECIAL_SHARE] --idle-timeout value Max time before closing idle connections. (default: "1m0s") [$IDLE_TIMEOUT] @@ -89,7 +93,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/storj/existing.md b/docs/en/cli-reference/storage/update/storj/existing.md index 58c3b948..61a87f0a 100644 --- a/docs/en/cli-reference/storage/update/storj/existing.md +++ b/docs/en/cli-reference/storage/update/storj/existing.md @@ -12,11 +12,18 @@ DESCRIPTION: --access-grant Access grant. + --description + Description of the remote. + OPTIONS: --access-grant value Access grant. [$ACCESS_GRANT] --help, -h show help + Advanced + + --description value Description of the remote. [$DESCRIPTION] + Client Config --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. @@ -30,7 +37,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/storj/new.md b/docs/en/cli-reference/storage/update/storj/new.md index f617bdc7..f0e354a7 100644 --- a/docs/en/cli-reference/storage/update/storj/new.md +++ b/docs/en/cli-reference/storage/update/storj/new.md @@ -27,6 +27,9 @@ DESCRIPTION: To access existing objects enter passphrase used for uploading. + --description + Description of the remote. + OPTIONS: --api-key value API key. [$API_KEY] @@ -34,6 +37,10 @@ OPTIONS: --passphrase value Encryption passphrase. [$PASSPHRASE] --satellite-address value Satellite address. (default: "us1.storj.io") [$SATELLITE_ADDRESS] + Advanced + + --description value Description of the remote. [$DESCRIPTION] + Client Config --client-ca-cert value Path to CA certificate used to verify servers. To remove, use empty string. @@ -47,7 +54,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/sugarsync.md b/docs/en/cli-reference/storage/update/sugarsync.md index 0de3c766..db8c972c 100644 --- a/docs/en/cli-reference/storage/update/sugarsync.md +++ b/docs/en/cli-reference/storage/update/sugarsync.md @@ -63,6 +63,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-key-id value Sugarsync Access Key ID. [$ACCESS_KEY_ID] @@ -76,6 +79,7 @@ OPTIONS: --authorization value Sugarsync authorization. [$AUTHORIZATION] --authorization-expiry value Sugarsync authorization expiry. [$AUTHORIZATION_EXPIRY] --deleted-id value Sugarsync deleted folder id. [$DELETED_ID] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. (default: "Slash,Ctl,InvalidUtf8,Dot") [$ENCODING] --refresh-token value Sugarsync refresh token. [$REFRESH_TOKEN] --root-id value Sugarsync root id. [$ROOT_ID] @@ -94,7 +98,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/swift.md b/docs/en/cli-reference/storage/update/swift.md index 46fc558c..30610816 100644 --- a/docs/en/cli-reference/storage/update/swift.md +++ b/docs/en/cli-reference/storage/update/swift.md @@ -1,9 +1,9 @@ -# OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) +# OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) {% code fullWidth="true" %} ``` NAME: - singularity storage update swift - OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + singularity storage update swift - OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) USAGE: singularity storage update swift [command options] @@ -33,6 +33,7 @@ DESCRIPTION: | https://auth.storage.memset.com/v1.0 | Memset Memstore UK | https://auth.storage.memset.com/v2.0 | Memset Memstore UK v2 | https://auth.cloud.ovh.net/v3 | OVH + | https://authenticate.ain.net | Blomp Cloud Storage --user-id User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID). @@ -96,33 +97,67 @@ DESCRIPTION: | pcs | OVH Public Cloud Storage | pca | OVH Public Cloud Archive + --fetch-until-empty-page + When paginating, always fetch unless we received an empty page. + + Consider using this option if rclone listings show fewer objects + than expected, or if repeated syncs copy unchanged objects. + + It is safe to enable this, but rclone may make more API calls than + necessary. + + This is one of a pair of workarounds to handle implementations + of the Swift API that do not implement pagination as expected. See + also "partial_page_fetch_threshold". + + --partial-page-fetch-threshold + When paginating, fetch if the current page is within this percentage of the limit. + + Consider using this option if rclone listings show fewer objects + than expected, or if repeated syncs copy unchanged objects. + + It is safe to enable this, but rclone may make more API calls than + necessary. + + This is one of a pair of workarounds to handle implementations + of the Swift API that do not implement pagination as expected. See + also "fetch_until_empty_page". + --chunk-size - Above this size files will be chunked into a _segments container. + Above this size files will be chunked. + + Above this size files will be chunked into a a `_segments` container + or a `.file-segments` directory. (See the `use_segments_container` option + for more info). Default for this is 5 GiB which is its maximum value, which + means only files above this size will be chunked. + + Rclone uploads chunked files as dynamic large objects (DLO). - Above this size files will be chunked into a _segments container. The - default for this is 5 GiB which is its maximum value. --no-chunk Don't chunk files during streaming upload. - When doing streaming uploads (e.g. using rcat or mount) setting this - flag will cause the swift backend to not upload chunked files. + When doing streaming uploads (e.g. using `rcat` or `mount` with + `--vfs-cache-mode off`) setting this flag will cause the swift backend + to not upload chunked files. - This will limit the maximum upload size to 5 GiB. However non chunked - files are easier to deal with and have an MD5SUM. + This will limit the maximum streamed upload size to 5 GiB. This is + useful because non chunked files are easier to deal with and have an + MD5SUM. - Rclone will still chunk files bigger than chunk_size when doing normal - copy operations. + Rclone will still chunk files bigger than `chunk_size` when doing + normal copy operations. --no-large-objects Disable support for static and dynamic large objects Swift cannot transparently store files bigger than 5 GiB. There are - two schemes for doing that, static or dynamic large objects, and the - API does not allow rclone to determine whether a file is a static or - dynamic large object without doing a HEAD on the object. Since these - need to be treated differently, this means rclone has to issue HEAD - requests for objects for example when reading checksums. + two schemes for chunking large files, static large objects (SLO) or + dynamic large objects (DLO), and the API does not allow rclone to + determine whether a file is a static or dynamic large object without + doing a HEAD on the object. Since these need to be treated + differently, this means rclone has to issue HEAD requests for objects + for example when reading checksums. When `no_large_objects` is set, rclone will assume that there are no static or dynamic large objects stored. This means it can stop doing @@ -133,16 +168,45 @@ DESCRIPTION: uploaded in chunks, so files bigger than 5 GiB will just fail on upload. - If you set this option and there *are* static or dynamic large objects, + If you set this option and there **are** static or dynamic large objects, then this will give incorrect hashes for them. Downloads will succeed, but other operations such as Remove and Copy will fail. + --use-segments-container + Choose destination for large object segments + + Swift cannot transparently store files bigger than 5 GiB and rclone + will chunk files larger than `chunk_size` (default 5 GiB) in order to + upload them. + + If this value is `true` the chunks will be stored in an additional + container named the same as the destination container but with + `_segments` appended. This means that there won't be any duplicated + data in the original container but having another container may not be + acceptable. + + If this value is `false` the chunks will be stored in a + `.file-segments` directory in the root of the container. This + directory will be omitted when listing the container. Some + providers (eg Blomp) require this mode as creating additional + containers isn't allowed. If it is desired to see the `.file-segments` + directory in the root then this flag must be set to `true`. + + If this value is `unset` (the default), then rclone will choose the value + to use. It will be `false` unless rclone detects any `auth_url`s that + it knows need it to be `true`. In this case you'll see a message in + the DEBUG log. + + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --application-credential-id value Application Credential ID (OS_APPLICATION_CREDENTIAL_ID). [$APPLICATION_CREDENTIAL_ID] @@ -167,11 +231,15 @@ OPTIONS: Advanced - --chunk-size value Above this size files will be chunked into a _segments container. (default: "5Gi") [$CHUNK_SIZE] - --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8") [$ENCODING] - --leave-parts-on-error If true avoid calling abort upload on a failure. (default: false) [$LEAVE_PARTS_ON_ERROR] - --no-chunk Don't chunk files during streaming upload. (default: false) [$NO_CHUNK] - --no-large-objects Disable support for static and dynamic large objects (default: false) [$NO_LARGE_OBJECTS] + --chunk-size value Above this size files will be chunked. (default: "5Gi") [$CHUNK_SIZE] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,InvalidUtf8") [$ENCODING] + --fetch-until-empty-page When paginating, always fetch unless we received an empty page. (default: false) [$FETCH_UNTIL_EMPTY_PAGE] + --leave-parts-on-error If true avoid calling abort upload on a failure. (default: false) [$LEAVE_PARTS_ON_ERROR] + --no-chunk Don't chunk files during streaming upload. (default: false) [$NO_CHUNK] + --no-large-objects Disable support for static and dynamic large objects (default: false) [$NO_LARGE_OBJECTS] + --partial-page-fetch-threshold value When paginating, fetch if the current page is within this percentage of the limit. (default: 0) [$PARTIAL_PAGE_FETCH_THRESHOLD] + --use-segments-container value Choose destination for large object segments (default: "unset") [$USE_SEGMENTS_CONTAINER] Client Config @@ -186,7 +254,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/union.md b/docs/en/cli-reference/storage/update/union.md index 65815b70..32b0360f 100644 --- a/docs/en/cli-reference/storage/update/union.md +++ b/docs/en/cli-reference/storage/update/union.md @@ -34,6 +34,9 @@ DESCRIPTION: If a remote has less than this much free space then it won't be considered for use in lfs or eplfs policies. + --description + Description of the remote. + OPTIONS: --action-policy value Policy to choose upstream on ACTION category. (default: "epall") [$ACTION_POLICY] @@ -45,6 +48,7 @@ OPTIONS: Advanced + --description value Description of the remote. [$DESCRIPTION] --min-free-space value Minimum viable free space for lfs/eplfs policies. (default: "1Gi") [$MIN_FREE_SPACE] Client Config @@ -60,7 +64,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/uptobox.md b/docs/en/cli-reference/storage/update/uptobox.md index 985fb77e..a1081f26 100644 --- a/docs/en/cli-reference/storage/update/uptobox.md +++ b/docs/en/cli-reference/storage/update/uptobox.md @@ -14,11 +14,17 @@ DESCRIPTION: Get it from https://uptobox.com/my_account. + --private + Set to make uploaded files private + --encoding The encoding for the backend. See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --access-token value Your access token. [$ACCESS_TOKEN] @@ -26,7 +32,9 @@ OPTIONS: Advanced - --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot") [$ENCODING] + --private Set to make uploaded files private (default: false) [$PRIVATE] Client Config @@ -41,7 +49,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/webdav.md b/docs/en/cli-reference/storage/update/webdav.md index 47da2b20..64a4df54 100644 --- a/docs/en/cli-reference/storage/update/webdav.md +++ b/docs/en/cli-reference/storage/update/webdav.md @@ -18,10 +18,12 @@ DESCRIPTION: Name of the WebDAV site/service/software you are using. Examples: + | fastmail | Fastmail Files | nextcloud | Nextcloud | owncloud | Owncloud | sharepoint | Sharepoint Online, authenticated by Microsoft account | sharepoint-ntlm | Sharepoint with NTLM authentication, usually self-hosted or on-premises + | rclone | rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol | other | Other site/service or software --user @@ -58,6 +60,30 @@ DESCRIPTION: You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'. + --pacer-min-sleep + Minimum time to sleep between API calls. + + --nextcloud-chunk-size + Nextcloud upload chunk size. + + We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances. + See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side + + Set to 0 to disable chunked uploading. + + + --owncloud-exclude-shares + Exclude ownCloud shares + + --owncloud-exclude-mounts + Exclude ownCloud mounted storages + + --unix-socket + Path to a unix domain socket to dial to, instead of opening a TCP connection directly + + --description + Description of the remote. + OPTIONS: --bearer-token value Bearer token instead of user/pass (e.g. a Macaroon). [$BEARER_TOKEN] @@ -70,8 +96,14 @@ OPTIONS: Advanced --bearer-token-command value Command to run to get a bearer token. [$BEARER_TOKEN_COMMAND] + --description value Description of the remote. [$DESCRIPTION] --encoding value The encoding for the backend. [$ENCODING] --headers value Set HTTP headers for all transactions. [$HEADERS] + --nextcloud-chunk-size value Nextcloud upload chunk size. (default: "10Mi") [$NEXTCLOUD_CHUNK_SIZE] + --owncloud-exclude-mounts Exclude ownCloud mounted storages (default: false) [$OWNCLOUD_EXCLUDE_MOUNTS] + --owncloud-exclude-shares Exclude ownCloud shares (default: false) [$OWNCLOUD_EXCLUDE_SHARES] + --pacer-min-sleep value Minimum time to sleep between API calls. (default: "10ms") [$PACER_MIN_SLEEP] + --unix-socket value Path to a unix domain socket to dial to, instead of opening a TCP connection directly [$UNIX_SOCKET] Client Config @@ -86,7 +118,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/yandex.md b/docs/en/cli-reference/storage/update/yandex.md index 9c3b2428..2ed6937c 100644 --- a/docs/en/cli-reference/storage/update/yandex.md +++ b/docs/en/cli-reference/storage/update/yandex.md @@ -40,6 +40,12 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --spoof-ua + Set the user agent to match an official version of the yandex disk client. May help with upload performance. + + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -48,11 +54,13 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] - --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Slash,Del,Ctl,InvalidUtf8,Dot") [$ENCODING] + --hard-delete Delete files permanently rather than putting them into the trash. (default: false) [$HARD_DELETE] + --spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance. (default: true) [$SPOOF_UA] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -67,7 +75,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/cli-reference/storage/update/zoho.md b/docs/en/cli-reference/storage/update/zoho.md index 3511e363..a584d613 100644 --- a/docs/en/cli-reference/storage/update/zoho.md +++ b/docs/en/cli-reference/storage/update/zoho.md @@ -52,6 +52,9 @@ DESCRIPTION: See the [encoding section in the overview](/overview/#encoding) for more info. + --description + Description of the remote. + OPTIONS: --client-id value OAuth Client Id. [$CLIENT_ID] @@ -61,10 +64,11 @@ OPTIONS: Advanced - --auth-url value Auth server URL. [$AUTH_URL] - --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] - --token value OAuth Access Token as a JSON blob. [$TOKEN] - --token-url value Token server url. [$TOKEN_URL] + --auth-url value Auth server URL. [$AUTH_URL] + --description value Description of the remote. [$DESCRIPTION] + --encoding value The encoding for the backend. (default: "Del,Ctl,InvalidUtf8") [$ENCODING] + --token value OAuth Access Token as a JSON blob. [$TOKEN] + --token-url value Token server url. [$TOKEN_URL] Client Config @@ -79,7 +83,7 @@ OPTIONS: --client-scan-concurrency value Max number of concurrent listing requests when scanning data source (default: 1) --client-timeout value IO idle timeout (default: 5m0s) --client-use-server-mod-time Use server modified time if possible (default: false) - --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone/v1.62.2-DEV) + --client-user-agent value Set the user-agent to a specified string. To remove, use empty string. (default: rclone default) Retry Strategy diff --git a/docs/en/web-api-reference/piece.md b/docs/en/web-api-reference/piece.md index 7af6451e..db860c73 100644 --- a/docs/en/web-api-reference/piece.md +++ b/docs/en/web-api-reference/piece.md @@ -12,3 +12,7 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/preparation/{id}/piece/{piece_cid}" method="delete" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + diff --git a/docs/en/web-api-reference/storage.md b/docs/en/web-api-reference/storage.md index 43bb6f34..40dddcd6 100644 --- a/docs/en/web-api-reference/storage.md +++ b/docs/en/web-api-reference/storage.md @@ -4,10 +4,6 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} -{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/acd" method="post" %} -[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) -{% endswagger %} - {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/azureblob" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -120,6 +116,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/oos/workload_identity_auth" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/opendrive" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -172,6 +172,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/gcs" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/huaweiobs" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -188,14 +192,26 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/leviia" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/liara" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/linode" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/lyvecloud" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/magalu" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/minio" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -208,6 +224,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/petabox" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/qiniu" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -216,6 +236,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/rclone" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/scaleway" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} @@ -232,6 +256,10 @@ [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} +{% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/synology" method="post" %} +[https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) +{% endswagger %} + {% swagger src="https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml" path="/storage/s3/tencentcos" method="post" %} [https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml](https://raw.githubusercontent.com/data-preservation-programs/singularity/main/docs/swagger/swagger.yaml) {% endswagger %} diff --git a/docs/swagger/docs.go b/docs/swagger/docs.go index 424f209f..1e4ecba9 100644 --- a/docs/swagger/docs.go +++ b/docs/swagger/docs.go @@ -721,6 +721,70 @@ const docTemplate = `{ } } }, + "/preparation/{id}/piece/{piece_cid}": { + "delete": { + "description": "Deletes a piece (CAR) and its associated records. For data pieces, resets file ranges\nto allow re-packing. For DAG pieces, resets directory export flags for re-generation.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Piece" + ], + "summary": "Delete a piece from a preparation", + "operationId": "DeletePiece", + "parameters": [ + { + "type": "string", + "description": "Preparation ID or name", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Piece CID", + "name": "piece_cid", + "in": "path", + "required": true + }, + { + "description": "Delete options", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dataprep.DeletePieceRequest" + } + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, "/preparation/{id}/schedules": { "get": { "consumes": [ @@ -1899,52 +1963,6 @@ const docTemplate = `{ } } }, - "/storage/acd": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Storage" - ], - "summary": "Create Acd storage", - "operationId": "CreateAcdStorage", - "parameters": [ - { - "description": "Request body", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/storage.createAcdStorageRequest" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/model.Storage" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/api.HTTPError" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "$ref": "#/definitions/api.HTTPError" - } - } - } - } - }, "/storage/azureblob": { "post": { "consumes": [ @@ -3233,6 +3251,52 @@ const docTemplate = `{ } } }, + "/storage/oos/workload_identity_auth": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Oos storage with workload_identity_auth - use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM).", + "operationId": "CreateOosWorkload_identity_authStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createOosWorkload_identity_authStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, "/storage/opendrive": { "post": { "consumes": [ @@ -3831,7 +3895,7 @@ const docTemplate = `{ } } }, - "/storage/s3/huaweiobs": { + "/storage/s3/gcs": { "post": { "consumes": [ "application/json" @@ -3842,8 +3906,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", - "operationId": "CreateS3HuaweiOBSStorage", + "summary": "Create S3 storage with GCS - Google Cloud Storage", + "operationId": "CreateS3GCSStorage", "parameters": [ { "description": "Request body", @@ -3851,7 +3915,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" + "$ref": "#/definitions/storage.createS3GCSStorageRequest" } } ], @@ -3877,7 +3941,7 @@ const docTemplate = `{ } } }, - "/storage/s3/ibmcos": { + "/storage/s3/huaweiobs": { "post": { "consumes": [ "application/json" @@ -3888,8 +3952,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with IBMCOS - IBM COS S3", - "operationId": "CreateS3IBMCOSStorage", + "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", + "operationId": "CreateS3HuaweiOBSStorage", "parameters": [ { "description": "Request body", @@ -3897,7 +3961,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" + "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" } } ], @@ -3923,7 +3987,7 @@ const docTemplate = `{ } } }, - "/storage/s3/idrive": { + "/storage/s3/ibmcos": { "post": { "consumes": [ "application/json" @@ -3934,8 +3998,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with IDrive - IDrive e2", - "operationId": "CreateS3IDriveStorage", + "summary": "Create S3 storage with IBMCOS - IBM COS S3", + "operationId": "CreateS3IBMCOSStorage", "parameters": [ { "description": "Request body", @@ -3943,7 +4007,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IDriveStorageRequest" + "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" } } ], @@ -3969,7 +4033,7 @@ const docTemplate = `{ } } }, - "/storage/s3/ionos": { + "/storage/s3/idrive": { "post": { "consumes": [ "application/json" @@ -3980,8 +4044,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with IONOS - IONOS Cloud", - "operationId": "CreateS3IONOSStorage", + "summary": "Create S3 storage with IDrive - IDrive e2", + "operationId": "CreateS3IDriveStorage", "parameters": [ { "description": "Request body", @@ -3989,7 +4053,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IONOSStorageRequest" + "$ref": "#/definitions/storage.createS3IDriveStorageRequest" } } ], @@ -4015,7 +4079,7 @@ const docTemplate = `{ } } }, - "/storage/s3/liara": { + "/storage/s3/ionos": { "post": { "consumes": [ "application/json" @@ -4026,8 +4090,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Liara - Liara Object Storage", - "operationId": "CreateS3LiaraStorage", + "summary": "Create S3 storage with IONOS - IONOS Cloud", + "operationId": "CreateS3IONOSStorage", "parameters": [ { "description": "Request body", @@ -4035,7 +4099,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LiaraStorageRequest" + "$ref": "#/definitions/storage.createS3IONOSStorageRequest" } } ], @@ -4061,7 +4125,7 @@ const docTemplate = `{ } } }, - "/storage/s3/lyvecloud": { + "/storage/s3/leviia": { "post": { "consumes": [ "application/json" @@ -4072,8 +4136,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", - "operationId": "CreateS3LyveCloudStorage", + "summary": "Create S3 storage with Leviia - Leviia Object Storage", + "operationId": "CreateS3LeviiaStorage", "parameters": [ { "description": "Request body", @@ -4081,7 +4145,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" + "$ref": "#/definitions/storage.createS3LeviiaStorageRequest" } } ], @@ -4107,7 +4171,7 @@ const docTemplate = `{ } } }, - "/storage/s3/minio": { + "/storage/s3/liara": { "post": { "consumes": [ "application/json" @@ -4118,8 +4182,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Minio - Minio Object Storage", - "operationId": "CreateS3MinioStorage", + "summary": "Create S3 storage with Liara - Liara Object Storage", + "operationId": "CreateS3LiaraStorage", "parameters": [ { "description": "Request body", @@ -4127,7 +4191,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3MinioStorageRequest" + "$ref": "#/definitions/storage.createS3LiaraStorageRequest" } } ], @@ -4153,7 +4217,7 @@ const docTemplate = `{ } } }, - "/storage/s3/netease": { + "/storage/s3/linode": { "post": { "consumes": [ "application/json" @@ -4164,8 +4228,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", - "operationId": "CreateS3NeteaseStorage", + "summary": "Create S3 storage with Linode - Linode Object Storage", + "operationId": "CreateS3LinodeStorage", "parameters": [ { "description": "Request body", @@ -4173,7 +4237,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" + "$ref": "#/definitions/storage.createS3LinodeStorageRequest" } } ], @@ -4199,7 +4263,7 @@ const docTemplate = `{ } } }, - "/storage/s3/other": { + "/storage/s3/lyvecloud": { "post": { "consumes": [ "application/json" @@ -4210,8 +4274,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Other - Any other S3 compatible provider", - "operationId": "CreateS3OtherStorage", + "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", + "operationId": "CreateS3LyveCloudStorage", "parameters": [ { "description": "Request body", @@ -4219,7 +4283,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3OtherStorageRequest" + "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" } } ], @@ -4245,7 +4309,7 @@ const docTemplate = `{ } } }, - "/storage/s3/qiniu": { + "/storage/s3/magalu": { "post": { "consumes": [ "application/json" @@ -4256,8 +4320,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", - "operationId": "CreateS3QiniuStorage", + "summary": "Create S3 storage with Magalu - Magalu Object Storage", + "operationId": "CreateS3MagaluStorage", "parameters": [ { "description": "Request body", @@ -4265,7 +4329,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3QiniuStorageRequest" + "$ref": "#/definitions/storage.createS3MagaluStorageRequest" } } ], @@ -4291,7 +4355,7 @@ const docTemplate = `{ } } }, - "/storage/s3/rackcorp": { + "/storage/s3/minio": { "post": { "consumes": [ "application/json" @@ -4302,8 +4366,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", - "operationId": "CreateS3RackCorpStorage", + "summary": "Create S3 storage with Minio - Minio Object Storage", + "operationId": "CreateS3MinioStorage", "parameters": [ { "description": "Request body", @@ -4311,7 +4375,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" + "$ref": "#/definitions/storage.createS3MinioStorageRequest" } } ], @@ -4337,7 +4401,7 @@ const docTemplate = `{ } } }, - "/storage/s3/scaleway": { + "/storage/s3/netease": { "post": { "consumes": [ "application/json" @@ -4348,8 +4412,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", - "operationId": "CreateS3ScalewayStorage", + "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", + "operationId": "CreateS3NeteaseStorage", "parameters": [ { "description": "Request body", @@ -4357,7 +4421,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" + "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" } } ], @@ -4383,7 +4447,7 @@ const docTemplate = `{ } } }, - "/storage/s3/seaweedfs": { + "/storage/s3/other": { "post": { "consumes": [ "application/json" @@ -4394,8 +4458,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", - "operationId": "CreateS3SeaweedFSStorage", + "summary": "Create S3 storage with Other - Any other S3 compatible provider", + "operationId": "CreateS3OtherStorage", "parameters": [ { "description": "Request body", @@ -4403,7 +4467,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" + "$ref": "#/definitions/storage.createS3OtherStorageRequest" } } ], @@ -4429,7 +4493,7 @@ const docTemplate = `{ } } }, - "/storage/s3/stackpath": { + "/storage/s3/petabox": { "post": { "consumes": [ "application/json" @@ -4440,8 +4504,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with StackPath - StackPath Object Storage", - "operationId": "CreateS3StackPathStorage", + "summary": "Create S3 storage with Petabox - Petabox Object Storage", + "operationId": "CreateS3PetaboxStorage", "parameters": [ { "description": "Request body", @@ -4449,7 +4513,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StackPathStorageRequest" + "$ref": "#/definitions/storage.createS3PetaboxStorageRequest" } } ], @@ -4475,7 +4539,7 @@ const docTemplate = `{ } } }, - "/storage/s3/storj": { + "/storage/s3/qiniu": { "post": { "consumes": [ "application/json" @@ -4486,8 +4550,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", - "operationId": "CreateS3StorjStorage", + "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", + "operationId": "CreateS3QiniuStorage", "parameters": [ { "description": "Request body", @@ -4495,7 +4559,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StorjStorageRequest" + "$ref": "#/definitions/storage.createS3QiniuStorageRequest" } } ], @@ -4521,7 +4585,7 @@ const docTemplate = `{ } } }, - "/storage/s3/tencentcos": { + "/storage/s3/rackcorp": { "post": { "consumes": [ "application/json" @@ -4532,8 +4596,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", - "operationId": "CreateS3TencentCOSStorage", + "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", + "operationId": "CreateS3RackCorpStorage", "parameters": [ { "description": "Request body", @@ -4541,7 +4605,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" + "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" } } ], @@ -4567,7 +4631,7 @@ const docTemplate = `{ } } }, - "/storage/s3/wasabi": { + "/storage/s3/rclone": { "post": { "consumes": [ "application/json" @@ -4578,8 +4642,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", - "operationId": "CreateS3WasabiStorage", + "summary": "Create S3 storage with Rclone - Rclone S3 Server", + "operationId": "CreateS3RcloneStorage", "parameters": [ { "description": "Request body", @@ -4587,7 +4651,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3WasabiStorageRequest" + "$ref": "#/definitions/storage.createS3RcloneStorageRequest" } } ], @@ -4613,7 +4677,7 @@ const docTemplate = `{ } } }, - "/storage/seafile": { + "/storage/s3/scaleway": { "post": { "consumes": [ "application/json" @@ -4624,8 +4688,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Seafile storage", - "operationId": "CreateSeafileStorage", + "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", + "operationId": "CreateS3ScalewayStorage", "parameters": [ { "description": "Request body", @@ -4633,7 +4697,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSeafileStorageRequest" + "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" } } ], @@ -4659,7 +4723,7 @@ const docTemplate = `{ } } }, - "/storage/sftp": { + "/storage/s3/seaweedfs": { "post": { "consumes": [ "application/json" @@ -4670,8 +4734,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sftp storage", - "operationId": "CreateSftpStorage", + "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", + "operationId": "CreateS3SeaweedFSStorage", "parameters": [ { "description": "Request body", @@ -4679,7 +4743,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSftpStorageRequest" + "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" } } ], @@ -4705,7 +4769,7 @@ const docTemplate = `{ } } }, - "/storage/sharefile": { + "/storage/s3/stackpath": { "post": { "consumes": [ "application/json" @@ -4716,8 +4780,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sharefile storage", - "operationId": "CreateSharefileStorage", + "summary": "Create S3 storage with StackPath - StackPath Object Storage", + "operationId": "CreateS3StackPathStorage", "parameters": [ { "description": "Request body", @@ -4725,7 +4789,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSharefileStorageRequest" + "$ref": "#/definitions/storage.createS3StackPathStorageRequest" } } ], @@ -4751,7 +4815,7 @@ const docTemplate = `{ } } }, - "/storage/sia": { + "/storage/s3/storj": { "post": { "consumes": [ "application/json" @@ -4762,8 +4826,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sia storage", - "operationId": "CreateSiaStorage", + "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", + "operationId": "CreateS3StorjStorage", "parameters": [ { "description": "Request body", @@ -4771,7 +4835,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSiaStorageRequest" + "$ref": "#/definitions/storage.createS3StorjStorageRequest" } } ], @@ -4797,7 +4861,7 @@ const docTemplate = `{ } } }, - "/storage/smb": { + "/storage/s3/synology": { "post": { "consumes": [ "application/json" @@ -4808,8 +4872,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Smb storage", - "operationId": "CreateSmbStorage", + "summary": "Create S3 storage with Synology - Synology C2 Object Storage", + "operationId": "CreateS3SynologyStorage", "parameters": [ { "description": "Request body", @@ -4817,7 +4881,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSmbStorageRequest" + "$ref": "#/definitions/storage.createS3SynologyStorageRequest" } } ], @@ -4843,7 +4907,7 @@ const docTemplate = `{ } } }, - "/storage/storj/existing": { + "/storage/s3/tencentcos": { "post": { "consumes": [ "application/json" @@ -4854,8 +4918,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Storj storage with existing - Use an existing access grant.", - "operationId": "CreateStorjExistingStorage", + "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", + "operationId": "CreateS3TencentCOSStorage", "parameters": [ { "description": "Request body", @@ -4863,7 +4927,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjExistingStorageRequest" + "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" } } ], @@ -4889,7 +4953,7 @@ const docTemplate = `{ } } }, - "/storage/storj/new": { + "/storage/s3/wasabi": { "post": { "consumes": [ "application/json" @@ -4900,8 +4964,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", - "operationId": "CreateStorjNewStorage", + "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", + "operationId": "CreateS3WasabiStorage", "parameters": [ { "description": "Request body", @@ -4909,7 +4973,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjNewStorageRequest" + "$ref": "#/definitions/storage.createS3WasabiStorageRequest" } } ], @@ -4935,7 +4999,7 @@ const docTemplate = `{ } } }, - "/storage/sugarsync": { + "/storage/seafile": { "post": { "consumes": [ "application/json" @@ -4946,8 +5010,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Sugarsync storage", - "operationId": "CreateSugarsyncStorage", + "summary": "Create Seafile storage", + "operationId": "CreateSeafileStorage", "parameters": [ { "description": "Request body", @@ -4955,7 +5019,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + "$ref": "#/definitions/storage.createSeafileStorageRequest" } } ], @@ -4981,7 +5045,7 @@ const docTemplate = `{ } } }, - "/storage/swift": { + "/storage/sftp": { "post": { "consumes": [ "application/json" @@ -4992,8 +5056,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Swift storage", - "operationId": "CreateSwiftStorage", + "summary": "Create Sftp storage", + "operationId": "CreateSftpStorage", "parameters": [ { "description": "Request body", @@ -5001,7 +5065,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSwiftStorageRequest" + "$ref": "#/definitions/storage.createSftpStorageRequest" } } ], @@ -5027,7 +5091,7 @@ const docTemplate = `{ } } }, - "/storage/union": { + "/storage/sharefile": { "post": { "consumes": [ "application/json" @@ -5038,8 +5102,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Union storage", - "operationId": "CreateUnionStorage", + "summary": "Create Sharefile storage", + "operationId": "CreateSharefileStorage", "parameters": [ { "description": "Request body", @@ -5047,7 +5111,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUnionStorageRequest" + "$ref": "#/definitions/storage.createSharefileStorageRequest" } } ], @@ -5073,7 +5137,7 @@ const docTemplate = `{ } } }, - "/storage/uptobox": { + "/storage/sia": { "post": { "consumes": [ "application/json" @@ -5084,8 +5148,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Uptobox storage", - "operationId": "CreateUptoboxStorage", + "summary": "Create Sia storage", + "operationId": "CreateSiaStorage", "parameters": [ { "description": "Request body", @@ -5093,7 +5157,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUptoboxStorageRequest" + "$ref": "#/definitions/storage.createSiaStorageRequest" } } ], @@ -5119,7 +5183,7 @@ const docTemplate = `{ } } }, - "/storage/webdav": { + "/storage/smb": { "post": { "consumes": [ "application/json" @@ -5130,8 +5194,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Webdav storage", - "operationId": "CreateWebdavStorage", + "summary": "Create Smb storage", + "operationId": "CreateSmbStorage", "parameters": [ { "description": "Request body", @@ -5139,7 +5203,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createWebdavStorageRequest" + "$ref": "#/definitions/storage.createSmbStorageRequest" } } ], @@ -5165,7 +5229,7 @@ const docTemplate = `{ } } }, - "/storage/yandex": { + "/storage/storj/existing": { "post": { "consumes": [ "application/json" @@ -5176,8 +5240,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Yandex storage", - "operationId": "CreateYandexStorage", + "summary": "Create Storj storage with existing - Use an existing access grant.", + "operationId": "CreateStorjExistingStorage", "parameters": [ { "description": "Request body", @@ -5185,7 +5249,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createYandexStorageRequest" + "$ref": "#/definitions/storage.createStorjExistingStorageRequest" } } ], @@ -5211,7 +5275,7 @@ const docTemplate = `{ } } }, - "/storage/zoho": { + "/storage/storj/new": { "post": { "consumes": [ "application/json" @@ -5222,8 +5286,8 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Create Zoho storage", - "operationId": "CreateZohoStorage", + "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", + "operationId": "CreateStorjNewStorage", "parameters": [ { "description": "Request body", @@ -5231,7 +5295,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createZohoStorageRequest" + "$ref": "#/definitions/storage.createStorjNewStorageRequest" } } ], @@ -5257,25 +5321,36 @@ const docTemplate = `{ } } }, - "/storage/{name}": { - "delete": { + "/storage/sugarsync": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "tags": [ "Storage" ], - "summary": "Remove a storage", - "operationId": "RemoveStorage", + "summary": "Create Sugarsync storage", + "operationId": "CreateSugarsyncStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } }, "400": { "description": "Bad Request", @@ -5290,8 +5365,10 @@ const docTemplate = `{ } } } - }, - "patch": { + } + }, + "/storage/swift": { + "post": { "consumes": [ "application/json" ], @@ -5301,26 +5378,16 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Update a storage connection", - "operationId": "UpdateStorage", + "summary": "Create Swift storage", + "operationId": "CreateSwiftStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "description": "Configuration", - "name": "config", + "description": "Request body", + "name": "request", "in": "body", "required": true, "schema": { - "type": "object", - "additionalProperties": { - "type": "string" - } + "$ref": "#/definitions/storage.createSwiftStorageRequest" } } ], @@ -5346,8 +5413,8 @@ const docTemplate = `{ } } }, - "/storage/{name}/explore/{path}": { - "get": { + "/storage/union": { + "post": { "consumes": [ "application/json" ], @@ -5357,32 +5424,24 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Explore directory entries in a storage system", - "operationId": "ExploreStorage", + "summary": "Create Union storage", + "operationId": "CreateUnionStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Path in the storage system to explore", - "name": "path", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createUnionStorageRequest" + } } ], "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/storage.DirEntry" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5400,8 +5459,8 @@ const docTemplate = `{ } } }, - "/storage/{name}/rename": { - "patch": { + "/storage/uptobox": { + "post": { "consumes": [ "application/json" ], @@ -5411,23 +5470,16 @@ const docTemplate = `{ "tags": [ "Storage" ], - "summary": "Rename a storage connection", - "operationId": "RenameStorage", + "summary": "Create Uptobox storage", + "operationId": "CreateUptoboxStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "description": "New storage name", + "description": "Request body", "name": "request", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.RenameRequest" + "$ref": "#/definitions/storage.createUptoboxStorageRequest" } } ], @@ -5453,24 +5505,35 @@ const docTemplate = `{ } } }, - "/wallet": { - "get": { + "/storage/webdav": { + "post": { + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "Wallet" + "Storage" + ], + "summary": "Create Webdav storage", + "operationId": "CreateWebdavStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createWebdavStorageRequest" + } + } ], - "summary": "List all imported wallets", - "operationId": "ListWallets", "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Wallet" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5486,7 +5549,9 @@ const docTemplate = `{ } } } - }, + } + }, + "/storage/yandex": { "post": { "consumes": [ "application/json" @@ -5495,10 +5560,10 @@ const docTemplate = `{ "application/json" ], "tags": [ - "Wallet" + "Storage" ], - "summary": "Import a private key", - "operationId": "ImportWallet", + "summary": "Create Yandex storage", + "operationId": "CreateYandexStorage", "parameters": [ { "description": "Request body", @@ -5506,7 +5571,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/wallet.ImportRequest" + "$ref": "#/definitions/storage.createYandexStorageRequest" } } ], @@ -5514,7 +5579,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/model.Wallet" + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5532,18 +5597,64 @@ const docTemplate = `{ } } }, - "/wallet/{address}": { + "/storage/zoho": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Zoho storage", + "operationId": "CreateZohoStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createZohoStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}": { "delete": { "tags": [ - "Wallet" + "Storage" ], - "summary": "Remove a wallet", - "operationId": "RemoveWallet", + "summary": "Remove a storage", + "operationId": "RemoveStorage", "parameters": [ { "type": "string", - "description": "Address", - "name": "address", + "description": "Storage ID or name", + "name": "name", "in": "path", "required": true } @@ -5565,57 +5676,332 @@ const docTemplate = `{ } } } - } - } - }, - "definitions": { - "admin.SetIdentityRequest": { - "type": "object", - "properties": { - "identity": { - "type": "string" + }, + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Update a storage connection", + "operationId": "UpdateStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "Configuration", + "name": "config", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } } } }, - "api.HTTPError": { - "type": "object", - "properties": { - "err": { - "type": "string" + "/storage/{name}/explore/{path}": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Explore directory entries in a storage system", + "operationId": "ExploreStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path in the storage system to explore", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/storage.DirEntry" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } } } }, - "dataprep.AddPieceRequest": { - "type": "object", - "required": [ - "pieceCid", - "pieceSize" - ], - "properties": { - "fileSize": { - "description": "File size of the CAR file, this is required for boost online deal", - "type": "integer" - }, - "pieceCid": { - "description": "CID of the piece", - "type": "string" - }, - "pieceSize": { - "description": "Size of the piece", - "type": "string" - }, - "rootCid": { - "description": "Root CID of the CAR file, used to populate the label field of storage deal", - "type": "string" + "/storage/{name}/rename": { + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Rename a storage connection", + "operationId": "RenameStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "New storage name", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.RenameRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } } } }, - "dataprep.CreateRequest": { - "type": "object", - "required": [ - "name" - ], - "properties": { + "/wallet": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "List all imported wallets", + "operationId": "ListWallets", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Wallet" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + }, + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "Import a private key", + "operationId": "ImportWallet", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/wallet.ImportRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Wallet" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/wallet/{address}": { + "delete": { + "tags": [ + "Wallet" + ], + "summary": "Remove a wallet", + "operationId": "RemoveWallet", + "parameters": [ + { + "type": "string", + "description": "Address", + "name": "address", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + } + }, + "definitions": { + "admin.SetIdentityRequest": { + "type": "object", + "properties": { + "identity": { + "type": "string" + } + } + }, + "api.HTTPError": { + "type": "object", + "properties": { + "err": { + "type": "string" + } + } + }, + "dataprep.AddPieceRequest": { + "type": "object", + "required": [ + "pieceCid", + "pieceSize" + ], + "properties": { + "fileSize": { + "description": "File size of the CAR file, this is required for boost online deal", + "type": "integer" + }, + "pieceCid": { + "description": "CID of the piece", + "type": "string" + }, + "pieceSize": { + "description": "Size of the piece", + "type": "string" + }, + "rootCid": { + "description": "Root CID of the CAR file, used to populate the label field of storage deal", + "type": "string" + } + } + }, + "dataprep.CreateRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { "deleteAfterExport": { "description": "Whether to delete the source files after export", "type": "boolean", @@ -5665,6 +6051,19 @@ const docTemplate = `{ } } }, + "dataprep.DeletePieceRequest": { + "type": "object", + "properties": { + "deleteCar": { + "description": "Delete the physical CAR file from storage (default: true)", + "type": "boolean" + }, + "force": { + "description": "Delete even if deals reference this piece", + "type": "boolean" + } + } + }, "dataprep.DirEntry": { "type": "object", "properties": { @@ -5949,6 +6348,10 @@ const docTemplate = `{ "jobId": { "type": "integer" }, + "minPieceSizePadding": { + "description": "MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility.", + "type": "integer" + }, "numOfFiles": { "type": "integer" }, @@ -5963,7 +6366,7 @@ const docTemplate = `{ "type": "string" }, "preparationId": { - "description": "Association", + "description": "Association - SET NULL for fast prep deletion, async cleanup", "type": "integer" }, "rootCid": { @@ -6166,7 +6569,7 @@ const docTemplate = `{ "type": "object", "properties": { "attachmentId": { - "description": "Associations", + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", "type": "integer" }, "cid": { @@ -6251,7 +6654,7 @@ const docTemplate = `{ "$ref": "#/definitions/model.JobType" }, "workerId": { - "description": "Associations", + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", "type": "string" } } @@ -6766,55 +7169,11 @@ const docTemplate = `{ } } }, - "storage.acdConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" - }, - "checkpoint": { - "description": "Checkpoint for internal polling (debug).", - "type": "string" - }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" - }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "templinkThreshold": { - "description": "Files \u003e= this size will be downloaded via their tempLink.", - "type": "string", - "default": "9Gi" - }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" - }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - }, - "uploadWaitPerGb": { - "description": "Additional time per GiB to wait after a failed complete upload to see if it appears.", - "type": "string", - "default": "3m0s" - } - } - }, "storage.azureblobConfig": { "type": "object", "properties": { "accessTier": { - "description": "Access tier of blob: hot, cool or archive.", + "description": "Access tier of blob: hot, cool, cold or archive.", "type": "string" }, "account": { @@ -6852,6 +7211,20 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "deleteSnapshots": { + "description": "Set to specify how to deal with snapshots on blob deletion.", + "type": "string", + "example": "" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -6881,12 +7254,12 @@ const docTemplate = `{ "default": 5000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -6975,13 +7348,17 @@ const docTemplate = `{ "type": "string", "default": "4Gi" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "disableChecksum": { "description": "Disable checksums for large (\u003e upload cutoff) files.", "type": "boolean", "default": false }, "downloadAuthDuration": { - "description": "Time before the authorization token will expire in s or suffix ms|s|m|h|d.", + "description": "Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.", "type": "string", "default": "1w" }, @@ -7007,13 +7384,18 @@ const docTemplate = `{ "description": "Application Key.", "type": "string" }, + "lifecycle": { + "description": "Set the number of days deleted files should be kept when creating a bucket.", + "type": "integer", + "default": 0 + }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -7021,6 +7403,11 @@ const docTemplate = `{ "description": "A flag string for X-Bz-Test-Mode header for debugging.", "type": "string" }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 4 + }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", @@ -7071,11 +7458,19 @@ const docTemplate = `{ "type": "integer", "default": 100 }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot" }, + "impersonate": { + "description": "Impersonate this user ID when using a service account.", + "type": "string" + }, "listChunk": { "description": "Size of listing chunk 1-1000.", "type": "integer", @@ -7105,7 +7500,7 @@ const docTemplate = `{ } } }, - "storage.createAcdStorageRequest": { + "storage.createAzureblobStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7120,7 +7515,7 @@ const docTemplate = `{ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.acdConfig" + "$ref": "#/definitions/storage.azureblobConfig" } ] }, @@ -7135,7 +7530,7 @@ const docTemplate = `{ } } }, - "storage.createAzureblobStorageRequest": { + "storage.createB2StorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7150,7 +7545,7 @@ const docTemplate = `{ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.azureblobConfig" + "$ref": "#/definitions/storage.b2Config" } ] }, @@ -7165,7 +7560,7 @@ const docTemplate = `{ } } }, - "storage.createB2StorageRequest": { + "storage.createBoxStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7180,7 +7575,7 @@ const docTemplate = `{ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.b2Config" + "$ref": "#/definitions/storage.boxConfig" } ] }, @@ -7195,7 +7590,7 @@ const docTemplate = `{ } } }, - "storage.createBoxStorageRequest": { + "storage.createDriveStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7210,7 +7605,7 @@ const docTemplate = `{ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.boxConfig" + "$ref": "#/definitions/storage.driveConfig" } ] }, @@ -7225,7 +7620,7 @@ const docTemplate = `{ } } }, - "storage.createDriveStorageRequest": { + "storage.createDropboxStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7240,7 +7635,7 @@ const docTemplate = `{ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.driveConfig" + "$ref": "#/definitions/storage.dropboxConfig" } ] }, @@ -7255,7 +7650,7 @@ const docTemplate = `{ } } }, - "storage.createDropboxStorageRequest": { + "storage.createFichierStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7270,37 +7665,7 @@ const docTemplate = `{ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.dropboxConfig" - } - ] - }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFichierStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] - }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.fichierConfig" + "$ref": "#/definitions/storage.fichierConfig" } ] }, @@ -7975,6 +8340,36 @@ const docTemplate = `{ } } }, + "storage.createOosWorkload_identity_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosWorkload_identity_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createOpendriveStorageRequest": { "type": "object", "properties": { @@ -8365,6 +8760,36 @@ const docTemplate = `{ } } }, + "storage.createS3GCSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3GCSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3HuaweiOBSStorageRequest": { "type": "object", "properties": { @@ -8485,6 +8910,36 @@ const docTemplate = `{ } } }, + "storage.createS3LeviiaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LeviiaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3LiaraStorageRequest": { "type": "object", "properties": { @@ -8515,6 +8970,36 @@ const docTemplate = `{ } } }, + "storage.createS3LinodeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LinodeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3LyveCloudStorageRequest": { "type": "object", "properties": { @@ -8545,6 +9030,36 @@ const docTemplate = `{ } } }, + "storage.createS3MagaluStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MagaluConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3MinioStorageRequest": { "type": "object", "properties": { @@ -8635,6 +9150,36 @@ const docTemplate = `{ } } }, + "storage.createS3PetaboxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3PetaboxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3QiniuStorageRequest": { "type": "object", "properties": { @@ -8695,6 +9240,36 @@ const docTemplate = `{ } } }, + "storage.createS3RcloneStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RcloneConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3ScalewayStorageRequest": { "type": "object", "properties": { @@ -8815,6 +9390,36 @@ const docTemplate = `{ } } }, + "storage.createS3SynologyStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SynologyConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3TencentCOSStorageRequest": { "type": "object", "properties": { @@ -9318,222 +9923,2088 @@ const docTemplate = `{ "type": "boolean", "default": false }, - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "8Mi" + }, + "clientId": { + "description": "Google Application Client Id", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "copyShortcutContent": { + "description": "Server side copy contents of shortcuts instead of the shortcut.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableHttp2": { + "description": "Disable drive using http2.", + "type": "boolean", + "default": true + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "InvalidUtf8" + }, + "envAuth": { + "description": "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "exportFormats": { + "description": "Comma separated list of preferred formats for downloading Google docs.", + "type": "string", + "default": "docx,xlsx,pptx,svg" + }, + "fastListBugFix": { + "description": "Work around a bug in Google Drive listing.", + "type": "boolean", + "default": true + }, + "formats": { + "description": "Deprecated: See export_formats.", + "type": "string" + }, + "impersonate": { + "description": "Impersonate this user when using a service account.", + "type": "string" + }, + "importFormats": { + "description": "Comma separated list of preferred formats for uploading Google docs.", + "type": "string" + }, + "keepRevisionForever": { + "description": "Keep new head revision of each file forever.", + "type": "boolean", + "default": false + }, + "listChunk": { + "description": "Size of listing chunk 100-1000, 0 to disable.", + "type": "integer", + "default": 1000 + }, + "metadataLabels": { + "description": "Control whether labels should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "metadataOwner": { + "description": "Control whether owner should be read or written in metadata.", + "type": "string", + "default": "read", + "example": "off" + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "pacerBurst": { + "description": "Number of API calls to allow without sleeping.", + "type": "integer", + "default": 100 + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "100ms" + }, + "resourceKey": { + "description": "Resource key for accessing a link-shared file.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "scope": { + "description": "Comma separated list of scopes that rclone should use when requesting access from drive.", + "type": "string", + "example": "drive" + }, + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "sharedWithMe": { + "description": "Only show files that are shared with me.", + "type": "boolean", + "default": false + }, + "showAllGdocs": { + "description": "Show all Google Docs including non-exportable ones in listings.", + "type": "boolean", + "default": false + }, + "sizeAsQuota": { + "description": "Show sizes as storage quota usage, not actual size.", + "type": "boolean", + "default": false + }, + "skipChecksumGphotos": { + "description": "Skip checksums on Google photos and videos only.", + "type": "boolean", + "default": false + }, + "skipDanglingShortcuts": { + "description": "If set skip dangling shortcut files.", + "type": "boolean", + "default": false + }, + "skipGdocs": { + "description": "Skip google documents in all listings.", + "type": "boolean", + "default": false + }, + "skipShortcuts": { + "description": "If set skip shortcut files.", + "type": "boolean", + "default": false + }, + "starredOnly": { + "description": "Only show files that are starred.", + "type": "boolean", + "default": false + }, + "stopOnDownloadLimit": { + "description": "Make download limit errors be fatal.", + "type": "boolean", + "default": false + }, + "stopOnUploadLimit": { + "description": "Make upload limit errors be fatal.", + "type": "boolean", + "default": false + }, + "teamDrive": { + "description": "ID of the Shared Drive (Team Drive).", + "type": "string" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", + "type": "boolean", + "default": false + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "8Mi" + }, + "useCreatedDate": { + "description": "Use file created date instead of modified date.", + "type": "boolean", + "default": false + }, + "useSharedDate": { + "description": "Use date file was shared instead of modified date.", + "type": "boolean", + "default": false + }, + "useTrash": { + "description": "Send files to the trash instead of deleting permanently.", + "type": "boolean", + "default": true + }, + "v2DownloadMinSize": { + "description": "If Object's are greater, use drive v2 API to download.", + "type": "string", + "default": "off" + } + } + }, + "storage.dropboxConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "chunkSize": { + "description": "Upload chunk size (\u003c 150Mi).", + "type": "string", + "default": "48Mi" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + }, + "impersonate": { + "description": "Impersonate this user when using a business account.", + "type": "string" + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "10ms" + }, + "rootNamespace": { + "description": "Specify a different Dropbox namespace ID to use as the root for all paths.", + "type": "string" + }, + "sharedFiles": { + "description": "Instructs rclone to work on individual shared files.", + "type": "boolean", + "default": false + }, + "sharedFolders": { + "description": "Instructs rclone to work on shared folders.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.fichierConfig": { + "type": "object", + "properties": { + "apiKey": { + "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", + "type": "string" + }, + "cdn": { + "description": "Set if you wish to use CDN download links.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" + }, + "filePassword": { + "description": "If you want to download a shared file that is password protected, add this parameter.", + "type": "string" + }, + "folderPassword": { + "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "type": "string" + }, + "sharedFolder": { + "description": "If you want to download a shared folder, add this parameter.", + "type": "string" + } + } + }, + "storage.filefabricConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,InvalidUtf8,Dot" + }, + "permanentToken": { + "description": "Permanent Authentication Token.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "token": { + "description": "Session Token.", + "type": "string" + }, + "tokenExpiry": { + "description": "Token expiry time.", + "type": "string" + }, + "url": { + "description": "URL of the Enterprise File Fabric to connect to.", + "type": "string", + "example": "https://storagemadeeasy.com" + }, + "version": { + "description": "Version read from the file fabric.", + "type": "string" + } + } + }, + "storage.ftpConfig": { + "type": "object", + "properties": { + "askPassword": { + "description": "Allow asking for FTP password when needed.", + "type": "boolean", + "default": false + }, + "closeTimeout": { + "description": "Maximum time to wait for a response to close.", + "type": "string", + "default": "1m0s" + }, + "concurrency": { + "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "type": "integer", + "default": 0 + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableEpsv": { + "description": "Disable using EPSV even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableMlsd": { + "description": "Disable using MLSD even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableTls13": { + "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", + "type": "boolean", + "default": false + }, + "disableUtf8": { + "description": "Disable using UTF-8 even if server advertises support.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,RightSpace,Dot", + "example": "Asterisk,Ctl,Dot,Slash" + }, + "explicitTls": { + "description": "Use Explicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "forceListHidden": { + "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", + "type": "boolean", + "default": false + }, + "host": { + "description": "FTP host to connect to.", + "type": "string" + }, + "idleTimeout": { + "description": "Max time before closing idle connections.", + "type": "string", + "default": "1m0s" + }, + "noCheckCertificate": { + "description": "Do not verify the TLS certificate of the server.", + "type": "boolean", + "default": false + }, + "pass": { + "description": "FTP password.", + "type": "string" + }, + "port": { + "description": "FTP port number.", + "type": "integer", + "default": 21 + }, + "shutTimeout": { + "description": "Maximum time to wait for data connection closing status.", + "type": "string", + "default": "1m0s" + }, + "socksProxy": { + "description": "Socks 5 proxy host.", + "type": "string" + }, + "tls": { + "description": "Use Implicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "tlsCacheSize": { + "description": "Size of TLS session cache for all control and data connections.", + "type": "integer", + "default": 32 + }, + "user": { + "description": "FTP username.", + "type": "string", + "default": "$USER" + }, + "writingMdtm": { + "description": "Use MDTM to set modification time (VsFtpd quirk)", + "type": "boolean", + "default": false + } + } + }, + "storage.gcsConfig": { + "type": "object", + "properties": { + "anonymous": { + "description": "Access public buckets and objects without credentials.", + "type": "boolean", + "default": false + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "bucketAcl": { + "description": "Access Control List for new buckets.", + "type": "string", + "example": "authenticatedRead" + }, + "bucketPolicyOnly": { + "description": "Access checks should use bucket-level IAM policies.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,CrLf,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string" + }, + "envAuth": { + "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "location": { + "description": "Location for the newly created buckets.", + "type": "string", + "example": "" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "objectAcl": { + "description": "Access Control List for new objects.", + "type": "string", + "example": "authenticatedRead" + }, + "projectNumber": { + "description": "Project number.", + "type": "string" + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "storageClass": { + "description": "The storage class to use when storing objects in Google Cloud Storage.", + "type": "string", + "example": "" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "userProject": { + "description": "User project.", + "type": "string" + } + } + }, + "storage.gphotosConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,CrLf,InvalidUtf8,Dot" + }, + "includeArchived": { + "description": "Also view and download archived media.", + "type": "boolean", + "default": false + }, + "readOnly": { + "description": "Set to make the Google Photos backend read only.", + "type": "boolean", + "default": false + }, + "readSize": { + "description": "Set to read the size of media items.", + "type": "boolean", + "default": false + }, + "startYear": { + "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", + "type": "integer", + "default": 2000 + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.hdfsConfig": { + "type": "object", + "properties": { + "dataTransferProtection": { + "description": "Kerberos data transfer protection: authentication|integrity|privacy.", + "type": "string", + "example": "privacy" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" + }, + "namenode": { + "description": "Hadoop name nodes and ports.", + "type": "string" + }, + "servicePrincipalName": { + "description": "Kerberos service principal name for the namenode.", + "type": "string" + }, + "username": { + "description": "Hadoop user name.", + "type": "string", + "example": "root" + } + } + }, + "storage.hidriveConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "chunkSize": { + "description": "Chunksize for chunked uploads.", + "type": "string", + "default": "48Mi" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableFetchingMemberCount": { + "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string", + "default": "https://api.hidrive.strato.com/2.1" + }, + "rootPrefix": { + "description": "The root/parent folder for all paths.", + "type": "string", + "default": "/", + "example": "/" + }, + "scopeAccess": { + "description": "Access permissions that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "rw", + "example": "rw" + }, + "scopeRole": { + "description": "User-level that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "user", + "example": "user" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for chunked uploads.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff/Threshold for chunked uploads.", + "type": "string", + "default": "96Mi" + } + } + }, + "storage.httpConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "headers": { + "description": "Set HTTP headers for all transactions.", + "type": "string" + }, + "noEscape": { + "description": "Do not escape URL metacharacters in path names.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "Don't use HEAD requests.", + "type": "boolean", + "default": false + }, + "noSlash": { + "description": "Set this if the site doesn't end directories with /.", + "type": "boolean", + "default": false + }, + "url": { + "description": "URL of HTTP host to connect to.", + "type": "string" + } + } + }, + "storage.internetarchiveConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "IAS3 Access Key.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't ask the server to test against MD5 checksum calculated by rclone.", + "type": "boolean", + "default": true + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "IAS3 Endpoint.", + "type": "string", + "default": "https://s3.us.archive.org" + }, + "frontEndpoint": { + "description": "Host of InternetArchive Frontend.", + "type": "string", + "default": "https://archive.org" + }, + "secretAccessKey": { + "description": "IAS3 Secret Key (password).", + "type": "string" + }, + "waitArchive": { + "description": "Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.", + "type": "string", + "default": "0s" + } + } + }, + "storage.jottacloudConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" + }, + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false + }, + "md5MemoryLimit": { + "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", + "type": "string", + "default": "10Mi" + }, + "noVersions": { + "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", + "type": "boolean", + "default": false + }, + "uploadResumeLimit": { + "description": "Files bigger than this can be resumed if the upload fail's.", + "type": "string", + "default": "10Mi" + } + } + }, + "storage.koofrDigistorageConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" + }, + "password": { + "description": "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.", + "type": "string" + }, + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.koofrKoofrConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" + }, + "password": { + "description": "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.", + "type": "string" + }, + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.koofrOtherConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "The Koofr API endpoint to use.", + "type": "string" + }, + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" + }, + "password": { + "description": "Your password for rclone (generate one at your service's settings page).", + "type": "string" + }, + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.localConfig": { + "type": "object", + "properties": { + "caseInsensitive": { + "description": "Force the filesystem to report itself as case insensitive.", + "type": "boolean", + "default": false + }, + "caseSensitive": { + "description": "Force the filesystem to report itself as case sensitive.", + "type": "boolean", + "default": false + }, + "copyLinks": { + "description": "Follow symlinks and copy the pointed to item.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" + }, + "links": { + "description": "Translate symlinks to/from regular files with a '.rclonelink' extension.", + "type": "boolean", + "default": false + }, + "noCheckUpdated": { + "description": "Don't check to see if the files change during upload.", + "type": "boolean", + "default": false + }, + "noClone": { + "description": "Disable reflink cloning for server-side copies.", + "type": "boolean", + "default": false + }, + "noPreallocate": { + "description": "Disable preallocation of disk space for transferred files.", + "type": "boolean", + "default": false + }, + "noSetModtime": { + "description": "Disable setting modtime.", + "type": "boolean", + "default": false + }, + "noSparse": { + "description": "Disable sparse files for multi-thread downloads.", + "type": "boolean", + "default": false + }, + "nounc": { + "description": "Disable UNC (long path names) conversion on Windows.", + "type": "boolean", + "default": false, + "example": true + }, + "oneFileSystem": { + "description": "Don't cross filesystem boundaries (unix/macOS only).", + "type": "boolean", + "default": false + }, + "skipLinks": { + "description": "Don't warn about skipped symlinks.", + "type": "boolean", + "default": false + }, + "timeType": { + "description": "Set what kind of time is returned.", + "type": "string", + "default": "mtime", + "example": "mtime" + }, + "unicodeNormalization": { + "description": "Apply unicode NFC normalization to paths and filenames.", + "type": "boolean", + "default": false + }, + "zeroSizeLinks": { + "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "type": "boolean", + "default": false + } + } + }, + "storage.mailruConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "checkHash": { + "description": "What should copy do if file checksum is mismatched or invalid.", + "type": "boolean", + "default": true, + "example": true + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "pass": { + "description": "Password.", + "type": "string" + }, + "quirks": { + "description": "Comma separated list of internal maintenance flags.", + "type": "string" + }, + "speedupEnable": { + "description": "Skip full upload if there is another file with same data hash.", + "type": "boolean", + "default": true, + "example": true + }, + "speedupFilePatterns": { + "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", + "type": "string", + "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", + "example": "" + }, + "speedupMaxDisk": { + "description": "This option allows you to disable speedup (put by hash) for large files.", + "type": "string", + "default": "3Gi", + "example": "0" + }, + "speedupMaxMemory": { + "description": "Files larger than the size given below will always be hashed on disk.", + "type": "string", + "default": "32Mi", + "example": "0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "user": { + "description": "User name (usually email).", + "type": "string" + }, + "userAgent": { + "description": "HTTP user agent used internally by client.", + "type": "string" + } + } + }, + "storage.megaConfig": { + "type": "object", + "properties": { + "debug": { + "description": "Output more debug from Mega.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false + }, + "pass": { + "description": "Password.", + "type": "string" + }, + "useHttps": { + "description": "Use HTTPS for transfers.", + "type": "boolean", + "default": false + }, + "user": { + "description": "User name.", + "type": "string" + } + } + }, + "storage.netstorageConfig": { + "type": "object", + "properties": { + "account": { + "description": "Set the NetStorage account name", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "host": { + "description": "Domain+path of NetStorage host to connect to.", + "type": "string" + }, + "protocol": { + "description": "Select between HTTP or HTTPS protocol.", + "type": "string", + "default": "https", + "example": "http" + }, + "secret": { + "description": "Set the NetStorage account secret/G2O key for authentication.", + "type": "string" + } + } + }, + "storage.onedriveConfig": { + "type": "object", + "properties": { + "accessScopes": { + "description": "Set scopes to be requested by rclone.", + "type": "string", + "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", + "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "avOverride": { + "description": "Allows download of files the server thinks has a virus.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", + "type": "string", + "default": "10Mi" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "delta": { + "description": "If set rclone will use delta listing to implement recursive listings.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableSitePermission": { + "description": "Disable the request for Sites.Read.All permission.", + "type": "boolean", + "default": false + }, + "driveId": { + "description": "The ID of the drive to use.", + "type": "string" + }, + "driveType": { + "description": "The type of the drive (personal | business | documentLibrary).", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" + }, + "exposeOnenoteFiles": { + "description": "Set to make OneNote files show up in directory listings.", + "type": "boolean", + "default": false + }, + "hardDelete": { + "description": "Permanently delete files on removal.", + "type": "boolean", + "default": false + }, + "hashType": { + "description": "Specify the hash in use for the backend.", + "type": "string", + "default": "auto", + "example": "auto" + }, + "linkPassword": { + "description": "Set the password for links created by the link command.", + "type": "string" + }, + "linkScope": { + "description": "Set the scope of the links created by the link command.", + "type": "string", + "default": "anonymous", + "example": "anonymous" + }, + "linkType": { + "description": "Set the type of the links created by the link command.", + "type": "string", + "default": "view", + "example": "view" + }, + "listChunk": { + "description": "Size of listing chunk.", + "type": "integer", + "default": 1000 + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "noVersions": { + "description": "Remove all versions on modifying operations.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Choose national cloud region for OneDrive.", + "type": "string", + "default": "global", + "example": "global" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.oosEnv_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosInstance_principal_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosNo_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosResource_principal_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosUser_principal_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "configFile": { + "description": "Path to OCI config file", + "type": "string", + "default": "~/.oci/config", + "example": "~/.oci/config" + }, + "configProfile": { + "description": "Profile name inside the oci config file", + "type": "string", + "default": "Default", + "example": "Default" }, - "chunkSize": { - "description": "Upload chunk size.", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "8Mi" + "default": "4.656Gi" }, - "clientId": { - "description": "Google Application Client Id", - "type": "string" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "copyShortcutContent": { - "description": "Server side copy contents of shortcuts instead of the shortcut.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "disableHttp2": { - "description": "Disable drive using http2.", - "type": "boolean", - "default": true - }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "InvalidUtf8" - }, - "exportFormats": { - "description": "Comma separated list of preferred formats for downloading Google docs.", - "type": "string", - "default": "docx,xlsx,pptx,svg" + "default": "Slash,InvalidUtf8,Dot" }, - "formats": { - "description": "Deprecated: See export_formats.", + "endpoint": { + "description": "Endpoint for Object storage API.", "type": "string" }, - "impersonate": { - "description": "Impersonate this user when using a service account.", - "type": "string" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "importFormats": { - "description": "Comma separated list of preferred formats for uploading Google docs.", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "keepRevisionForever": { - "description": "Keep new head revision of each file forever.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "listChunk": { - "description": "Size of listing chunk 100-1000, 0 to disable.", - "type": "integer", - "default": 1000 + "region": { + "description": "Object storage Region", + "type": "string" }, - "pacerBurst": { - "description": "Number of API calls to allow without sleeping.", - "type": "integer", - "default": 100 + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "pacerMinSleep": { - "description": "Minimum time to sleep between API calls.", + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", "type": "string", - "default": "100ms" + "example": "" }, - "resourceKey": { - "description": "Resource key for accessing a link-shared file.", - "type": "string" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "scope": { - "description": "Scope that rclone should use when requesting access from drive.", + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", "type": "string", - "example": "drive" + "example": "" }, - "serverSideAcrossConfigs": { - "description": "Allow server-side operations (e.g. copy) to work across different drive configs.", + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosWorkload_identity_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", "type": "boolean", "default": false }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", + "compartment": { + "description": "Object storage compartment OCID", "type": "string" }, - "sharedWithMe": { - "description": "Only show files that are shared with me.", - "type": "boolean", - "default": false + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "sizeAsQuota": { - "description": "Show sizes as storage quota usage, not actual size.", - "type": "boolean", - "default": false + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "skipChecksumGphotos": { - "description": "Skip MD5 checksum on Google photos and videos only.", - "type": "boolean", - "default": false + "description": { + "description": "Description of the remote.", + "type": "string" }, - "skipDanglingShortcuts": { - "description": "If set skip dangling shortcut files.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "skipGdocs": { - "description": "Skip google documents in all listings.", - "type": "boolean", - "default": false + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "skipShortcuts": { - "description": "If set skip shortcut files.", - "type": "boolean", - "default": false + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" }, - "starredOnly": { - "description": "Only show files that are starred.", + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", "type": "boolean", "default": false }, - "stopOnDownloadLimit": { - "description": "Make download limit errors be fatal.", - "type": "boolean", - "default": false + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "stopOnUploadLimit": { - "description": "Make upload limit errors be fatal.", + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "teamDrive": { - "description": "ID of the Shared Drive (Team Drive).", + "region": { + "description": "Object storage Region", "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "trashedOnly": { - "description": "Only show files that are in the trash.", - "type": "boolean", - "default": false + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", "type": "string", - "default": "8Mi" + "example": "" }, - "useCreatedDate": { - "description": "Use file created date instead of modified date.", - "type": "boolean", - "default": false + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "useSharedDate": { - "description": "Use date file was shared instead of modified date.", - "type": "boolean", - "default": false + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.opendriveConfig": { + "type": "object", + "properties": { + "chunkSize": { + "description": "Files will be uploaded in chunks this size.", + "type": "string", + "default": "10Mi" }, - "useTrash": { - "description": "Send files to the trash instead of deleting permanently.", - "type": "boolean", - "default": true + "description": { + "description": "Description of the remote.", + "type": "string" }, - "v2DownloadMinSize": { - "description": "If Object's are greater, use drive v2 API to download.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "default": "off" + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" + }, + "password": { + "description": "Password.", + "type": "string" + }, + "username": { + "description": "Username.", + "type": "string" } } }, - "storage.dropboxConfig": { + "storage.pcloudConfig": { "type": "object", "properties": { "authUrl": { "description": "Auth server URL.", "type": "string" }, - "batchCommitTimeout": { - "description": "Max time to wait for a batch to finish committing", - "type": "string", - "default": "10m0s" - }, - "batchMode": { - "description": "Upload file batching sync|async|off.", - "type": "string", - "default": "sync" - }, - "batchSize": { - "description": "Max number of files in upload batch.", - "type": "integer", - "default": 0 - }, - "batchTimeout": { - "description": "Max time to allow an idle upload batch before uploading.", - "type": "string", - "default": "0s" - }, - "chunkSize": { - "description": "Upload chunk size (\u003c 150Mi).", - "type": "string", - "default": "48Mi" - }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -9542,24 +12013,29 @@ const docTemplate = `{ "description": "OAuth Client Secret.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "impersonate": { - "description": "Impersonate this user when using a business account.", - "type": "string" + "hostname": { + "description": "Hostname to connect to.", + "type": "string", + "default": "api.pcloud.com", + "example": "api.pcloud.com" }, - "sharedFiles": { - "description": "Instructs rclone to work on individual shared files.", - "type": "boolean", - "default": false + "password": { + "description": "Your pcloud password.", + "type": "string" }, - "sharedFolders": { - "description": "Instructs rclone to work on shared folders.", - "type": "boolean", - "default": false + "rootFolderId": { + "description": "Fill in for rclone to use a non root folder as its starting point.", + "type": "string", + "default": "d0" }, "token": { "description": "OAuth Access Token as a JSON blob.", @@ -9568,1535 +12044,1829 @@ const docTemplate = `{ "tokenUrl": { "description": "Token server url.", "type": "string" + }, + "username": { + "description": "Your pcloud username.", + "type": "string" } } }, - "storage.fichierConfig": { + "storage.premiumizemeConfig": { "type": "object", "properties": { "apiKey": { - "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", + "description": "API Key.", + "type": "string" + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" - }, - "filePassword": { - "description": "If you want to download a shared file that is password protected, add this parameter.", - "type": "string" + "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "folderPassword": { - "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "sharedFolder": { - "description": "If you want to download a shared folder, add this parameter.", + "tokenUrl": { + "description": "Token server url.", "type": "string" } } }, - "storage.filefabricConfig": { + "storage.putioConfig": { "type": "object", "properties": { - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,Del,Ctl,InvalidUtf8,Dot" - }, - "permanentToken": { - "description": "Permanent Authentication Token.", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "rootFolderId": { - "description": "ID of the root folder.", + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "token": { - "description": "Session Token.", + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "tokenExpiry": { - "description": "Token expiry time.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "url": { - "description": "URL of the Enterprise File Fabric to connect to.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "https://storagemadeeasy.com" + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "version": { - "description": "Version read from the file fabric.", + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", "type": "string" } } }, - "storage.ftpConfig": { + "storage.qingstorConfig": { "type": "object", "properties": { - "askPassword": { - "description": "Allow asking for FTP password when needed.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "QingStor Access Key ID.", + "type": "string" }, - "closeTimeout": { - "description": "Maximum time to wait for a response to close.", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "1m0s" + "default": "4Mi" }, - "concurrency": { - "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "connectionRetries": { + "description": "Number of connection retries.", "type": "integer", - "default": 0 - }, - "disableEpsv": { - "description": "Disable using EPSV even if server advertises support.", - "type": "boolean", - "default": false - }, - "disableMlsd": { - "description": "Disable using MLSD even if server advertises support.", - "type": "boolean", - "default": false - }, - "disableTls13": { - "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", - "type": "boolean", - "default": false + "default": 3 }, - "disableUtf8": { - "description": "Disable using UTF-8 even if server advertises support.", - "type": "boolean", - "default": false + "description": { + "description": "Description of the remote.", + "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,Del,Ctl,RightSpace,Dot", - "example": "Asterisk,Ctl,Dot,Slash" - }, - "explicitTls": { - "description": "Use Explicit FTPS (FTP over TLS).", - "type": "boolean", - "default": false - }, - "forceListHidden": { - "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", - "type": "boolean", - "default": false + "default": "Slash,Ctl,InvalidUtf8" }, - "host": { - "description": "FTP host to connect to.", + "endpoint": { + "description": "Enter an endpoint URL to connection QingStor API.", "type": "string" }, - "idleTimeout": { - "description": "Max time before closing idle connections.", - "type": "string", - "default": "1m0s" - }, - "noCheckCertificate": { - "description": "Do not verify the TLS certificate of the server.", + "envAuth": { + "description": "Get QingStor credentials from runtime.", "type": "boolean", - "default": false + "default": false, + "example": false }, - "pass": { - "description": "FTP password.", + "secretAccessKey": { + "description": "QingStor Secret Access Key (password).", "type": "string" }, - "port": { - "description": "FTP port number.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", "type": "integer", - "default": 21 + "default": 1 }, - "shutTimeout": { - "description": "Maximum time to wait for data connection closing status.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "1m0s" - }, - "tls": { - "description": "Use Implicit FTPS (FTP over TLS).", - "type": "boolean", - "default": false - }, - "tlsCacheSize": { - "description": "Size of TLS session cache for all control and data connections.", - "type": "integer", - "default": 32 + "default": "200Mi" }, - "user": { - "description": "FTP username.", + "zone": { + "description": "Zone to connect to.", "type": "string", - "default": "$USER" - }, - "writingMdtm": { - "description": "Use MDTM to set modification time (VsFtpd quirk)", - "type": "boolean", - "default": false + "example": "pek3a" } } }, - "storage.gcsConfig": { + "storage.s3AWSConfig": { "type": "object", "properties": { - "anonymous": { - "description": "Access public buckets and objects without credentials.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "authUrl": { - "description": "Auth server URL.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, "bucketAcl": { - "description": "Access Control List for new buckets.", + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "authenticatedRead" + "example": "private" }, - "bucketPolicyOnly": { - "description": "Access checks should use bucket-level IAM policies.", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "clientId": { - "description": "OAuth Client Id.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "decompress": { - "description": "If set this will decompress gzip encoded objects.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", + "description": "Endpoint for S3 API.", "type": "string" }, "envAuth": { - "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", "default": false, "example": false }, - "location": { - "description": "Location for the newly created buckets.", - "type": "string", - "example": "" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", "type": "boolean", "default": false }, - "objectAcl": { - "description": "Access Control List for new objects.", - "type": "string", - "example": "authenticatedRead" - }, - "projectNumber": { - "description": "Project number.", - "type": "string" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", - "type": "string" + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "storageClass": { - "description": "The storage class to use when storing objects in Google Cloud Storage.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string", "example": "" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" - }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - } - } - }, - "storage.gphotosConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "unset" }, - "includeArchived": { - "description": "Also view and download archived media.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "readOnly": { - "description": "Set to make the Google Photos backend read only.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "readSize": { - "description": "Set to read the size of media items.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "startYear": { - "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", - "type": "integer", - "default": 2000 + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - } - } - }, - "storage.hdfsConfig": { - "type": "object", - "properties": { - "dataTransferProtection": { - "description": "Kerberos data transfer protection: authentication|integrity|privacy.", + "region": { + "description": "Region to connect to.", "type": "string", - "example": "privacy" + "example": "us-east-1" }, - "encoding": { - "description": "The encoding for the backend.", + "requesterPays": { + "description": "Enables requester pays option when interacting with S3 bucket.", + "type": "boolean", + "default": false + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "namenode": { - "description": "Hadoop name node and port.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "servicePrincipalName": { - "description": "Kerberos service principal name for the namenode.", + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "username": { - "description": "Hadoop user name.", - "type": "string", - "example": "root" - } - } - }, - "storage.hidriveConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "chunkSize": { - "description": "Chunksize for chunked uploads.", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "default": "48Mi" + "example": "" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "" + }, + "stsEndpoint": { + "description": "Endpoint for STS (deprecated).", "type": "string" }, - "disableFetchingMemberCount": { - "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + }, + "useAccelerateEndpoint": { + "description": "If true use the AWS S3 accelerated endpoint.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "default": "Slash,Dot" + "default": "unset" }, - "endpoint": { - "description": "Endpoint for the service.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "https://api.hidrive.strato.com/2.1" + "default": "unset" }, - "rootPrefix": { - "description": "The root/parent folder for all paths.", - "type": "string", - "default": "/", - "example": "/" + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "scopeAccess": { - "description": "Access permissions that rclone should use when requesting access from HiDrive.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "rw", - "example": "rw" + "default": "unset" }, - "scopeRole": { - "description": "User-level that rclone should use when requesting access from HiDrive.", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "default": "user", - "example": "user" + "default": "unset" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for chunked uploads.", - "type": "integer", - "default": 4 + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff/Threshold for chunked uploads.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "96Mi" - } - } - }, - "storage.httpConfig": { - "type": "object", - "properties": { - "headers": { - "description": "Set HTTP headers for all transactions.", - "type": "string" + "default": "off" }, - "noHead": { - "description": "Don't use HEAD requests.", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "noSlash": { - "description": "Set this if the site doesn't end directories with /.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false - }, - "url": { - "description": "URL of HTTP host to connect to.", - "type": "string" } } }, - "storage.internetarchiveConfig": { + "storage.s3AlibabaConfig": { "type": "object", "properties": { "accessKeyId": { - "description": "IAS3 Access Key.", + "description": "AWS Access Key ID.", "type": "string" }, - "disableChecksum": { - "description": "Don't ask the server to test against MD5 checksum calculated by rclone.", - "type": "boolean", - "default": true + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot" + "example": "private" }, - "endpoint": { - "description": "IAS3 Endpoint.", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "https://s3.us.archive.org" + "default": "5Mi" }, - "frontEndpoint": { - "description": "Host of InternetArchive Frontend.", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "https://archive.org" + "default": "4.656Gi" }, - "secretAccessKey": { - "description": "IAS3 Secret Key (password).", - "type": "string" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, - "waitArchive": { - "description": "Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.", - "type": "string", - "default": "0s" - } - } - }, - "storage.jottacloudConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "md5MemoryLimit": { - "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", - "type": "string", - "default": "10Mi" - }, - "noVersions": { - "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "trashedOnly": { - "description": "Only show files that are in the trash.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "uploadResumeLimit": { - "description": "Files bigger than this can be resumed if the upload fail's.", - "type": "string", - "default": "10Mi" - } - } - }, - "storage.koofrDigistorageConfig": { - "type": "object", - "properties": { + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "endpoint": { + "description": "Endpoint for OSS API.", + "type": "string", + "example": "oss-accelerate.aliyuncs.com" }, - "password": { - "description": "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).", - "type": "string" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", "default": true }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.koofrKoofrConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "unset" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "password": { - "description": "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", - "default": true + "default": false }, - "user": { - "description": "Your user name.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.koofrOtherConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "endpoint": { - "description": "The Koofr API endpoint to use.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "mountid": { - "description": "Mount ID of the mount to use.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "password": { - "description": "Your password for rclone (generate one at your service's settings page).", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "setmtime": { - "description": "Does the backend support setting modification time.", - "type": "boolean", - "default": true + "storageClass": { + "description": "The storage class to use when storing new objects in OSS.", + "type": "string", + "example": "" }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.localConfig": { - "type": "object", - "properties": { - "caseInsensitive": { - "description": "Force the filesystem to report itself as case insensitive.", - "type": "boolean", - "default": false + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "caseSensitive": { - "description": "Force the filesystem to report itself as case sensitive.", - "type": "boolean", - "default": false + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "copyLinks": { - "description": "Follow symlinks and copy the pointed to item.", - "type": "boolean", - "default": false + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "Slash,Dot" + "default": "unset" }, - "links": { - "description": "Translate symlinks to/from regular files with a '.rclonelink' extension.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "noCheckUpdated": { - "description": "Don't check to see if the files change during upload.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "noPreallocate": { - "description": "Disable preallocation of disk space for transferred files.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "noSetModtime": { - "description": "Disable setting modtime.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "noSparse": { - "description": "Disable sparse files for multi-thread downloads.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false + } + } + }, + "storage.s3ArvanCloudConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "nounc": { - "description": "Disable UNC (long path names) conversion on Windows.", - "type": "boolean", - "default": false, - "example": true + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "oneFileSystem": { - "description": "Don't cross filesystem boundaries (unix/macOS only).", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "skipLinks": { - "description": "Don't warn about skipped symlinks.", + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "unicodeNormalization": { - "description": "Apply unicode NFC normalization to paths and filenames.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "zeroSizeLinks": { - "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false - } - } - }, - "storage.mailruConfig": { - "type": "object", - "properties": { - "checkHash": { - "description": "What should copy do if file checksum is mismatched or invalid.", - "type": "boolean", - "default": true, - "example": true + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "pass": { - "description": "Password.", - "type": "string" + "endpoint": { + "description": "Endpoint for Arvan Cloud Object Storage (AOS) API.", + "type": "string", + "example": "s3.ir-thr-at1.arvanstorage.ir" }, - "quirks": { - "description": "Comma separated list of internal maintenance flags.", - "type": "string" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "speedupEnable": { - "description": "Skip full upload if there is another file with same data hash.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", - "default": true, - "example": true + "default": true }, - "speedupFilePatterns": { - "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", - "type": "string", - "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", - "example": "" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "speedupMaxDisk": { - "description": "This option allows you to disable speedup (put by hash) for large files.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "3Gi", - "example": "0" + "default": "unset" }, - "speedupMaxMemory": { - "description": "Files larger than the size given below will always be hashed on disk.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must match endpoint.", "type": "string", - "default": "32Mi", - "example": "0" + "example": "ir-thr-at1" }, - "user": { - "description": "User name (usually email).", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "userAgent": { - "description": "HTTP user agent used internally by client.", - "type": "string" - } - } - }, - "storage.megaConfig": { - "type": "object", - "properties": { - "debug": { - "description": "Output more debug from Mega.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "pass": { - "description": "Password.", - "type": "string" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "useHttps": { - "description": "Use HTTPS for transfers.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "user": { - "description": "User name.", - "type": "string" - } - } - }, - "storage.netstorageConfig": { - "type": "object", - "properties": { - "account": { - "description": "Set the NetStorage account name", - "type": "string" + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "host": { - "description": "Domain+path of NetStorage host to connect to.", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "protocol": { - "description": "Select between HTTP or HTTPS protocol.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "https", - "example": "http" + "default": "Off" }, - "secret": { - "description": "Set the NetStorage account secret/G2O key for authentication.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.onedriveConfig": { - "type": "object", - "properties": { - "accessScopes": { - "description": "Set scopes to be requested by rclone.", - "type": "string", - "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", - "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" }, - "authUrl": { - "description": "Auth server URL.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "chunkSize": { - "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", - "type": "string", - "default": "10Mi" - }, - "clientId": { - "description": "OAuth Client Id.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "storageClass": { + "description": "The storage class to use when storing new objects in ArvanCloud.", + "type": "string", + "example": "STANDARD" }, - "disableSitePermission": { - "description": "Disable the request for Sites.Read.All permission.", - "type": "boolean", - "default": false + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "driveId": { - "description": "The ID of the drive to use.", - "type": "string" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "driveType": { - "description": "The type of the drive (personal | business | documentLibrary).", - "type": "string" + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" + "default": "unset" }, - "exposeOnenoteFiles": { - "description": "Set to make OneNote files show up in directory listings.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "hashType": { - "description": "Specify the hash in use for the backend.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "auto", - "example": "auto" - }, - "linkPassword": { - "description": "Set the password for links created by the link command.", - "type": "string" + "default": "unset" }, - "linkScope": { - "description": "Set the scope of the links created by the link command.", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "default": "anonymous", - "example": "anonymous" + "default": "unset" }, - "linkType": { - "description": "Set the type of the links created by the link command.", - "type": "string", - "default": "view", - "example": "view" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "listChunk": { - "description": "Size of listing chunk.", - "type": "integer", - "default": 1000 + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "noVersions": { - "description": "Remove all versions on modifying operations.", + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "region": { - "description": "Choose national cloud region for OneDrive.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "global", - "example": "global" - }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "default": "off" }, - "serverSideAcrossConfigs": { - "description": "Allow server-side operations (e.g. copy) to work across different onedrive configs.", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" - }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosEnv_authConfig": { + "storage.s3CephConfig": { "type": "object", "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Object storage API.", + "description": "Endpoint for S3 API.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false }, - "namespace": { - "description": "Object storage namespace", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" + }, + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", "example": "" }, "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", "example": "" }, "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", + "description": "If using KMS ID you must provide the ARN of Key.", "type": "string", "example": "" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", - "type": "string", - "default": "Standard", - "example": "Standard" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosInstance_principal_authConfig": { + "storage.s3ChinaMobileConfig": { "type": "object", "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "" + "default": "Slash,InvalidUtf8,Dot" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "endpoint": { + "description": "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.", "type": "string", - "example": "" + "example": "eos-wuxi-1.cmecloud.cn" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", - "default": 10 + "default": 0 }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" - } - } - }, - "storage.oosNo_authConfig": { - "type": "object", - "properties": { - "chunkSize": { - "description": "Chunk size to use for uploading.", + "locationConstraint": { + "description": "Location constraint - must match endpoint.", "type": "string", - "default": "5Mi" + "example": "wuxi1" }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "copyTimeout": { - "description": "Timeout for copy.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" + }, + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", "example": "" }, "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", "example": "" }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", + "storageClass": { + "description": "The storage class to use when storing new objects in ChinaMobile.", "type": "string", "example": "" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", - "type": "string", - "default": "Standard", - "example": "Standard" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosResource_principal_authConfig": { + "storage.s3CloudflareConfig": { "type": "object", "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Object storage API.", + "description": "Endpoint for S3 API.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, "region": { - "description": "Object storage Region", + "description": "Region to connect to.", + "type": "string", + "example": "auto" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "" + "default": "200Mi" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "" + "default": "unset" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "200Mi" + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosUser_principal_authConfig": { + "storage.s3DigitalOceanConfig": { "type": "object", "properties": { - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "compartment": { - "description": "Object storage compartment OCID", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "configFile": { - "description": "Path to OCI config file", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "~/.oci/config", - "example": "~/.oci/config" + "example": "private" }, - "configProfile": { - "description": "Profile name inside the oci config file", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "Default", - "example": "Default" + "default": "5Mi" }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "" + "default": "Slash,InvalidUtf8,Dot" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "" + "example": "syd1.digitaloceanspaces.com" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", - "default": 10 + "default": 0 }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" - } - } - }, - "storage.opendriveConfig": { - "type": "object", - "properties": { - "chunkSize": { - "description": "Files will be uploaded in chunks this size.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "10Mi" + "default": "1m0s" }, - "encoding": { - "description": "The encoding for the backend.", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" + "default": "unset" }, - "password": { - "description": "Password.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "username": { - "description": "Username.", - "type": "string" - } - } - }, - "storage.pcloudConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "clientSecret": { - "description": "OAuth Client Secret.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "region": { + "description": "Region to connect to.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "example": "" }, - "hostname": { - "description": "Hostname to connect to.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "api.pcloud.com", - "example": "api.pcloud.com" + "default": "Off" }, - "password": { - "description": "Your pcloud password.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "rootFolderId": { - "description": "Fill in for rclone to use a non root folder as its starting point.", - "type": "string", - "default": "d0" - }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "username": { - "description": "Your pcloud username.", - "type": "string" - } - } - }, - "storage.premiumizemeConfig": { - "type": "object", - "properties": { - "apiKey": { - "description": "API Key.", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "encoding": { - "description": "The encoding for the backend.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" - } - } - }, - "storage.putioConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", + "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" - } - } - }, - "storage.qingstorConfig": { - "type": "object", - "properties": { - "accessKeyId": { - "description": "QingStor Access Key ID.", - "type": "string" + "default": "unset" }, - "chunkSize": { - "description": "Chunk size to use for uploading.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "4Mi" + "default": "unset" }, - "connectionRetries": { - "description": "Number of connection retries.", - "type": "integer", - "default": 3 + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "Slash,Ctl,InvalidUtf8" + "default": "unset" }, - "endpoint": { - "description": "Enter an endpoint URL to connection QingStor API.", - "type": "string" + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "envAuth": { - "description": "Get QingStor credentials from runtime.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", - "default": false, - "example": false + "default": false }, - "secretAccessKey": { - "description": "QingStor Secret Access Key (password).", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 1 + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "200Mi" + "default": "off" }, - "zone": { - "description": "Zone to connect to.", - "type": "string", - "example": "pek3a" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.s3AWSConfig": { + "storage.s3DreamhostConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11127,6 +13897,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11148,7 +13927,8 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "objects-us-east-1.dream.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11161,11 +13941,6 @@ const docTemplate = `{ "type": "boolean", "default": true }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", - "type": "boolean", - "default": false - }, "listChunk": { "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", @@ -11183,8 +13958,7 @@ const docTemplate = `{ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "" + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -11192,12 +13966,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11233,22 +14007,17 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "us-east-1" + "example": "" }, - "requesterPays": { - "description": "Enables requester pays option when interacting with S3 bucket.", - "type": "boolean", - "default": false + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -11257,42 +14026,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" - }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", - "type": "string", - "example": "" - }, - "stsEndpoint": { - "description": "Endpoint for STS.", - "type": "string" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11301,8 +14036,18 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, - "useAccelerateEndpoint": { - "description": "If true use the AWS S3 accelerated endpoint.", + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, @@ -11311,11 +14056,21 @@ const docTemplate = `{ "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11326,6 +14081,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11333,7 +14093,7 @@ const docTemplate = `{ } } }, - "storage.s3AlibabaConfig": { + "storage.s3GCSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11364,6 +14124,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11384,9 +14153,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OSS API.", + "description": "Endpoint for Google Cloud Storage.", "type": "string", - "example": "oss-accelerate.aliyuncs.com" + "example": "https://storage.googleapis.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11414,18 +14183,22 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11458,6 +14231,16 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -11470,13 +14253,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in OSS.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11485,16 +14263,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11505,6 +14308,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11512,7 +14320,7 @@ const docTemplate = `{ } } }, - "storage.s3ArvanCloudConfig": { + "storage.s3HuaweiOBSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11543,6 +14351,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11563,9 +14380,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Arvan Cloud Object Storage (AOS) API.", + "description": "Endpoint for OBS API.", "type": "string", - "example": "s3.ir-thr-at1.arvanstorage.com" + "example": "obs.af-south-1.myhuaweicloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11593,23 +14410,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "ir-thr-at1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11642,6 +14454,16 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.", + "type": "string", + "example": "af-south-1" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -11654,13 +14476,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in ArvanCloud.", - "type": "string", - "example": "STANDARD" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11669,16 +14486,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11689,6 +14531,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11696,7 +14543,7 @@ const docTemplate = `{ } } }, - "storage.s3CephConfig": { + "storage.s3IBMCOSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11705,7 +14552,8 @@ const docTemplate = `{ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" + "type": "string", + "example": "private" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -11727,6 +14575,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11747,8 +14604,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for IBM COS S3 API.", + "type": "string", + "example": "s3.us.cloud-object-storage.appdomain.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11777,8 +14635,9 @@ const docTemplate = `{ "default": 0 }, "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" + "description": "Location constraint - must match endpoint when using IBM Cloud Public.", + "type": "string", + "example": "us-standard" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -11786,12 +14645,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11821,58 +14680,33 @@ const docTemplate = `{ "default": false }, "profile": { - "description": "Profile to use in the shared credentials file.", - "type": "string" - }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" - }, - "secretAccessKey": { - "description": "AWS Secret Access Key (password).", - "type": "string" - }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sessionToken": { - "description": "An AWS session token.", - "type": "string" - }, - "sharedCredentialsFile": { - "description": "Path to the shared credentials file.", + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "region": { + "description": "Region to connect to.", "type": "string", "example": "" }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11881,16 +14715,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11901,6 +14760,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11908,7 +14772,7 @@ const docTemplate = `{ } } }, - "storage.s3ChinaMobileConfig": { + "storage.s3IDriveConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11939,6 +14803,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11958,11 +14831,6 @@ const docTemplate = `{ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, - "endpoint": { - "description": "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.", - "type": "string", - "example": "eos-wuxi-1.cmecloud.cn" - }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -11989,23 +14857,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "wuxi1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12038,15 +14901,15 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -12055,33 +14918,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "storageClass": { - "description": "The storage class to use when storing new objects in ChinaMobile.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12090,16 +14928,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12110,6 +14973,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12117,13 +14985,17 @@ const docTemplate = `{ } } }, - "storage.s3CloudflareConfig": { + "storage.s3IONOSConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -12144,6 +15016,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12164,8 +15045,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for IONOS S3 Object Storage.", + "type": "string", + "example": "s3-eu-central-1.ionoscloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12199,12 +15081,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12238,9 +15120,14 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "Region to connect to.", + "description": "Region where your bucket will be created and your data stored.", "type": "string", - "example": "auto" + "example": "de" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -12255,7 +15142,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12264,16 +15151,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12284,6 +15196,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12291,7 +15208,7 @@ const docTemplate = `{ } } }, - "storage.s3DigitalOceanConfig": { + "storage.s3LeviiaConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12322,6 +15239,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12343,8 +15269,7 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string", - "example": "syd1.digitaloceanspaces.com" + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12372,22 +15297,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12425,6 +15346,11 @@ const docTemplate = `{ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -12438,7 +15364,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12447,16 +15373,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12467,6 +15418,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12474,7 +15430,7 @@ const docTemplate = `{ } } }, - "storage.s3DreamhostConfig": { + "storage.s3LiaraConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12505,6 +15461,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12525,9 +15490,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", + "description": "Endpoint for Liara Object Storage API.", "type": "string", - "example": "objects-us-east-1.dream.io" + "example": "storage.iran.liara.space" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12555,22 +15520,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12603,10 +15564,10 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -12620,8 +15581,13 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in Liara", + "type": "string", + "example": "STANDARD" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12630,16 +15596,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12650,6 +15641,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12657,7 +15653,7 @@ const docTemplate = `{ } } }, - "storage.s3HuaweiOBSConfig": { + "storage.s3LinodeConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12688,6 +15684,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12708,9 +15713,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OBS API.", + "description": "Endpoint for Linode Object Storage API.", "type": "string", - "example": "obs.af-south-1.myhuaweicloud.com" + "example": "us-southeast-1.linodeobjects.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12744,12 +15749,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12782,10 +15787,10 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "af-south-1" + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -12800,7 +15805,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12809,16 +15814,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12829,6 +15859,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12836,7 +15871,7 @@ const docTemplate = `{ } } }, - "storage.s3IBMCOSConfig": { + "storage.s3LyveCloudConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12845,8 +15880,7 @@ const docTemplate = `{ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string", - "example": "private" + "type": "string" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -12868,6 +15902,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12888,9 +15931,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IBM COS S3 API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us.cloud-object-storage.appdomain.cloud" + "example": "s3.us-east-1.lyvecloud.seagate.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12919,9 +15962,8 @@ const docTemplate = `{ "default": 0 }, "locationConstraint": { - "description": "Location constraint - must match endpoint when using IBM Cloud Public.", - "type": "string", - "example": "us-standard" + "description": "Location constraint - must be set to match the Region.", + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -12929,12 +15971,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12972,6 +16014,11 @@ const docTemplate = `{ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -12985,7 +16032,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12994,16 +16041,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13014,6 +16086,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13021,7 +16098,7 @@ const docTemplate = `{ } } }, - "storage.s3IDriveConfig": { + "storage.s3MagaluConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13052,6 +16129,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13071,6 +16157,11 @@ const docTemplate = `{ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string", + "example": "br-se1.magaluobjects.com" + }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -13103,12 +16194,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13141,6 +16232,11 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -13153,8 +16249,13 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in Magalu.", + "type": "string", + "example": "STANDARD" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13163,16 +16264,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13183,6 +16309,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13190,7 +16321,7 @@ const docTemplate = `{ } } }, - "storage.s3IONOSConfig": { + "storage.s3MinioConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13221,6 +16352,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13241,9 +16381,8 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IONOS S3 Object Storage.", - "type": "string", - "example": "s3-eu-central-1.ionoscloud.com" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13271,18 +16410,22 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13316,14 +16459,24 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "Region where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "de" + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" }, + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -13332,8 +16485,33 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13342,16 +16520,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13362,6 +16565,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13369,7 +16577,7 @@ const docTemplate = `{ } } }, - "storage.s3LiaraConfig": { + "storage.s3NeteaseConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13400,6 +16608,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13420,9 +16637,8 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Liara Object Storage API.", - "type": "string", - "example": "storage.iran.liara.space" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13450,18 +16666,22 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13494,6 +16714,16 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -13506,13 +16736,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Liara", - "type": "string", - "example": "STANDARD" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13521,16 +16746,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13541,6 +16791,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13548,7 +16803,7 @@ const docTemplate = `{ } } }, - "storage.s3LyveCloudConfig": { + "storage.s3OtherConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13579,6 +16834,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13600,8 +16864,7 @@ const docTemplate = `{ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string", - "example": "s3.us-east-1.lyvecloud.seagate.com" + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13639,12 +16902,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13682,6 +16945,11 @@ const docTemplate = `{ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -13695,7 +16963,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13704,16 +16972,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13724,6 +17017,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13731,7 +17029,7 @@ const docTemplate = `{ } } }, - "storage.s3MinioConfig": { + "storage.s3PetaboxConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13762,6 +17060,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13782,8 +17089,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for Petabox S3 Object Storage.", + "type": "string", + "example": "s3.petabox.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13811,22 +17119,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13860,18 +17164,18 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "Region to connect to.", + "description": "Region where your bucket will be created and your data stored.", "type": "string", - "example": "" + "example": "us-east-1" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", - "type": "string" - }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" + "type": "string" }, "sessionToken": { "description": "An AWS session token.", @@ -13881,33 +17185,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13916,16 +17195,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13936,6 +17240,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13943,7 +17252,7 @@ const docTemplate = `{ } } }, - "storage.s3NeteaseConfig": { + "storage.s3QiniuConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13974,6 +17283,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13994,8 +17312,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for Qiniu Object Storage.", + "type": "string", + "example": "s3-cn-east-1.qiniucs.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14025,7 +17344,8 @@ const docTemplate = `{ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string" + "type": "string", + "example": "cn-east-1" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -14033,12 +17353,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14074,7 +17394,12 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "cn-east-1" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14088,8 +17413,13 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in Qiniu.", + "type": "string", + "example": "STANDARD" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14098,16 +17428,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14118,6 +17473,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14125,7 +17485,7 @@ const docTemplate = `{ } } }, - "storage.s3OtherConfig": { + "storage.s3RackCorpConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14156,6 +17516,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14176,8 +17545,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for RackCorp Object Storage.", + "type": "string", + "example": "s3.rackcorp.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14206,8 +17576,9 @@ const docTemplate = `{ "default": 0 }, "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" + "description": "Location constraint - the location where your bucket will be located and your data stored.", + "type": "string", + "example": "global" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -14215,12 +17586,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14254,9 +17625,14 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "Region to connect to.", + "description": "region - the location where your bucket will be created and your data stored.", "type": "string", - "example": "" + "example": "global" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14271,7 +17647,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14280,16 +17656,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14300,6 +17701,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14307,7 +17713,7 @@ const docTemplate = `{ } } }, - "storage.s3QiniuConfig": { + "storage.s3RcloneConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14338,6 +17744,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14358,9 +17773,8 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Qiniu Object Storage.", - "type": "string", - "example": "s3-cn-east-1.qiniucs.com" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14390,8 +17804,7 @@ const docTemplate = `{ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "cn-east-1" + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -14399,12 +17812,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14440,7 +17853,12 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "cn-east-1" + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14454,13 +17872,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Qiniu.", - "type": "string", - "example": "STANDARD" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14469,16 +17882,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14489,6 +17927,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14496,7 +17939,7 @@ const docTemplate = `{ } } }, - "storage.s3RackCorpConfig": { + "storage.s3ScalewayConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14527,6 +17970,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14547,9 +17999,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for RackCorp Object Storage.", + "description": "Endpoint for Scaleway Object Storage.", "type": "string", - "example": "s3.rackcorp.com" + "example": "s3.nl-ams.scw.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14577,23 +18029,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - the location where your bucket will be located and your data stored.", - "type": "string", - "example": "global" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14627,9 +18074,14 @@ const docTemplate = `{ "type": "string" }, "region": { - "description": "region - the location where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "global" + "example": "nl-ams" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14643,8 +18095,13 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14653,16 +18110,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14673,6 +18155,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14680,7 +18167,7 @@ const docTemplate = `{ } } }, - "storage.s3ScalewayConfig": { + "storage.s3SeaweedFSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14711,6 +18198,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14731,9 +18227,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Scaleway Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.nl-ams.scw.cloud" + "example": "localhost:8333" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14761,18 +18257,22 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14808,7 +18308,12 @@ const docTemplate = `{ "region": { "description": "Region to connect to.", "type": "string", - "example": "nl-ams" + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14822,13 +18327,8 @@ const docTemplate = `{ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14837,16 +18337,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14857,6 +18382,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14864,7 +18394,7 @@ const docTemplate = `{ } } }, - "storage.s3SeaweedFSConfig": { + "storage.s3StackPathConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14895,6 +18425,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14915,9 +18454,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", + "description": "Endpoint for StackPath Object Storage.", "type": "string", - "example": "localhost:8333" + "example": "s3.us-east-2.stackpathstorage.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14945,22 +18484,18 @@ const docTemplate = `{ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14998,6 +18533,11 @@ const docTemplate = `{ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15011,7 +18551,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15020,16 +18560,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15040,6 +18605,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15047,17 +18617,13 @@ const docTemplate = `{ } } }, - "storage.s3StackPathConfig": { + "storage.s3StorjConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -15078,6 +18644,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15098,9 +18673,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for StackPath Object Storage.", + "description": "Endpoint for Storj Gateway.", "type": "string", - "example": "s3.us-east-2.stackpathstorage.com" + "example": "gateway.storjshare.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15134,12 +18709,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15172,10 +18747,10 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -15190,7 +18765,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15199,16 +18774,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15219,6 +18819,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15226,7 +18831,7 @@ const docTemplate = `{ } } }, - "storage.s3StorjConfig": { + "storage.s3SynologyConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15253,6 +18858,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15273,9 +18887,9 @@ const docTemplate = `{ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Storj Gateway.", + "description": "Endpoint for Synology C2 Object Storage API.", "type": "string", - "example": "gateway.storjshare.io" + "example": "eu-001.s3.synologyc2.net" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15303,18 +18917,22 @@ const docTemplate = `{ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15347,6 +18965,16 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region where your data stored.", + "type": "string", + "example": "eu-001" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15360,7 +18988,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15369,16 +18997,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15389,6 +19042,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15428,6 +19086,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15484,12 +19151,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15522,6 +19189,11 @@ const docTemplate = `{ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15540,7 +19212,7 @@ const docTemplate = `{ "example": "" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15549,16 +19221,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15569,6 +19266,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15607,6 +19309,15 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15667,12 +19378,12 @@ const docTemplate = `{ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15710,6 +19421,11 @@ const docTemplate = `{ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15723,7 +19439,7 @@ const docTemplate = `{ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15732,16 +19448,41 @@ const docTemplate = `{ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send ` + "`" + `Accept-Encoding: gzip` + "`" + ` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15752,6 +19493,11 @@ const docTemplate = `{ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15776,6 +19522,10 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -15826,6 +19576,20 @@ const docTemplate = `{ "type": "integer", "default": 64 }, + "connections": { + "description": "Maximum number of SFTP simultaneous connections, 0 for unlimited.", + "type": "integer", + "default": 0 + }, + "copyIsHardlink": { + "description": "Set to enable server side copies using hardlinks.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "disableConcurrentReads": { "description": "If set don't use concurrent reads.", "type": "boolean", @@ -15845,6 +19609,10 @@ const docTemplate = `{ "description": "SSH host to connect to.", "type": "string" }, + "hostKeyAlgorithms": { + "description": "Space separated list of host key algorithms, ordered by preference.", + "type": "string" + }, "idleTimeout": { "description": "Max time before closing idle connections.", "type": "string", @@ -15928,6 +19696,14 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "socksProxy": { + "description": "Socks 5 proxy host.", + "type": "string" + }, + "ssh": { + "description": "Path and arguments to external ssh binary.", + "type": "string" + }, "subsystem": { "description": "Specifies the SSH2 subsystem on the remote host.", "type": "string", @@ -15954,11 +19730,27 @@ const docTemplate = `{ "storage.sharefileConfig": { "type": "object", "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, "chunkSize": { "description": "Upload chunk size.", "type": "string", "default": "64Mi" }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -15973,6 +19765,14 @@ const docTemplate = `{ "type": "string", "example": "" }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, "uploadCutoff": { "description": "Cutoff for switching to multipart upload.", "type": "string", @@ -15992,6 +19792,10 @@ const docTemplate = `{ "type": "string", "default": "http://127.0.0.1:9980" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16012,6 +19816,10 @@ const docTemplate = `{ "type": "boolean", "default": true }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "domain": { "description": "Domain name for NTLM authentication.", "type": "string", @@ -16062,6 +19870,10 @@ const docTemplate = `{ "accessGrant": { "description": "Access grant.", "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" } } }, @@ -16072,6 +19884,10 @@ const docTemplate = `{ "description": "API key.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "passphrase": { "description": "Encryption passphrase.", "type": "string" @@ -16107,6 +19923,10 @@ const docTemplate = `{ "description": "Sugarsync deleted folder id.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16165,10 +19985,14 @@ const docTemplate = `{ "default": 0 }, "chunkSize": { - "description": "Above this size files will be chunked into a _segments container.", + "description": "Above this size files will be chunked.", "type": "string", "default": "5Gi" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "domain": { "description": "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)", "type": "string" @@ -16190,6 +20014,11 @@ const docTemplate = `{ "default": false, "example": false }, + "fetchUntilEmptyPage": { + "description": "When paginating, always fetch unless we received an empty page.", + "type": "boolean", + "default": false + }, "key": { "description": "API key or password (OS_PASSWORD).", "type": "string" @@ -16209,6 +20038,11 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "partialPageFetchThreshold": { + "description": "When paginating, fetch if the current page is within this percentage of the limit.", + "type": "integer", + "default": 0 + }, "region": { "description": "Region name - optional (OS_REGION_NAME).", "type": "string" @@ -16234,6 +20068,11 @@ const docTemplate = `{ "description": "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).", "type": "string" }, + "useSegmentsContainer": { + "description": "Choose destination for large object segments", + "type": "string", + "default": "unset" + }, "user": { "description": "User name to log in (OS_USERNAME).", "type": "string" @@ -16262,6 +20101,10 @@ const docTemplate = `{ "type": "string", "default": "epmfs" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "minFreeSpace": { "description": "Minimum viable free space for lfs/eplfs policies.", "type": "string", @@ -16285,10 +20128,19 @@ const docTemplate = `{ "description": "Your access token.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot" + }, + "private": { + "description": "Set to make uploaded files private", + "type": "boolean", + "default": false } } }, @@ -16303,6 +20155,10 @@ const docTemplate = `{ "description": "Command to run to get a bearer token.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string" @@ -16311,10 +20167,34 @@ const docTemplate = `{ "description": "Set HTTP headers for all transactions.", "type": "string" }, + "nextcloudChunkSize": { + "description": "Nextcloud upload chunk size.", + "type": "string", + "default": "10Mi" + }, + "owncloudExcludeMounts": { + "description": "Exclude ownCloud mounted storages", + "type": "boolean", + "default": false + }, + "owncloudExcludeShares": { + "description": "Exclude ownCloud shares", + "type": "boolean", + "default": false + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "10ms" + }, "pass": { "description": "Password.", "type": "string" }, + "unixSocket": { + "description": "Path to a unix domain socket to dial to, instead of opening a TCP connection directly", + "type": "string" + }, "url": { "description": "URL of http host to connect to.", "type": "string" @@ -16326,7 +20206,7 @@ const docTemplate = `{ "vendor": { "description": "Name of the WebDAV site/service/software you are using.", "type": "string", - "example": "nextcloud" + "example": "fastmail" } } }, @@ -16345,6 +20225,10 @@ const docTemplate = `{ "description": "OAuth Client Secret.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16355,6 +20239,11 @@ const docTemplate = `{ "type": "boolean", "default": false }, + "spoofUa": { + "description": "Set the user agent to match an official version of the yandex disk client. May help with upload performance.", + "type": "boolean", + "default": true + }, "token": { "description": "OAuth Access Token as a JSON blob.", "type": "string" @@ -16380,6 +20269,10 @@ const docTemplate = `{ "description": "OAuth Client Secret.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16422,7 +20315,7 @@ const docTemplate = `{ // SwaggerInfo holds exported Swagger Info so clients can modify it var SwaggerInfo = &swag.Spec{ Version: "beta", - Host: "localhost:9090", + Host: "", BasePath: "/api", Schemes: []string{}, Title: "Singularity API", diff --git a/docs/swagger/swagger.json b/docs/swagger/swagger.json index 2bf8a3a6..8b813e4b 100644 --- a/docs/swagger/swagger.json +++ b/docs/swagger/swagger.json @@ -19,7 +19,6 @@ }, "version": "beta" }, - "host": "localhost:9090", "basePath": "/api", "paths": { "/deal": { @@ -715,6 +714,70 @@ } } }, + "/preparation/{id}/piece/{piece_cid}": { + "delete": { + "description": "Deletes a piece (CAR) and its associated records. For data pieces, resets file ranges\nto allow re-packing. For DAG pieces, resets directory export flags for re-generation.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Piece" + ], + "summary": "Delete a piece from a preparation", + "operationId": "DeletePiece", + "parameters": [ + { + "type": "string", + "description": "Preparation ID or name", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Piece CID", + "name": "piece_cid", + "in": "path", + "required": true + }, + { + "description": "Delete options", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dataprep.DeletePieceRequest" + } + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, "/preparation/{id}/schedules": { "get": { "consumes": [ @@ -1893,52 +1956,6 @@ } } }, - "/storage/acd": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Storage" - ], - "summary": "Create Acd storage", - "operationId": "CreateAcdStorage", - "parameters": [ - { - "description": "Request body", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/storage.createAcdStorageRequest" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/model.Storage" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/api.HTTPError" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "$ref": "#/definitions/api.HTTPError" - } - } - } - } - }, "/storage/azureblob": { "post": { "consumes": [ @@ -3227,6 +3244,52 @@ } } }, + "/storage/oos/workload_identity_auth": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Oos storage with workload_identity_auth - use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM).", + "operationId": "CreateOosWorkload_identity_authStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createOosWorkload_identity_authStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, "/storage/opendrive": { "post": { "consumes": [ @@ -3825,7 +3888,7 @@ } } }, - "/storage/s3/huaweiobs": { + "/storage/s3/gcs": { "post": { "consumes": [ "application/json" @@ -3836,8 +3899,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", - "operationId": "CreateS3HuaweiOBSStorage", + "summary": "Create S3 storage with GCS - Google Cloud Storage", + "operationId": "CreateS3GCSStorage", "parameters": [ { "description": "Request body", @@ -3845,7 +3908,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" + "$ref": "#/definitions/storage.createS3GCSStorageRequest" } } ], @@ -3871,7 +3934,7 @@ } } }, - "/storage/s3/ibmcos": { + "/storage/s3/huaweiobs": { "post": { "consumes": [ "application/json" @@ -3882,8 +3945,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with IBMCOS - IBM COS S3", - "operationId": "CreateS3IBMCOSStorage", + "summary": "Create S3 storage with HuaweiOBS - Huawei Object Storage Service", + "operationId": "CreateS3HuaweiOBSStorage", "parameters": [ { "description": "Request body", @@ -3891,7 +3954,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" + "$ref": "#/definitions/storage.createS3HuaweiOBSStorageRequest" } } ], @@ -3917,7 +3980,7 @@ } } }, - "/storage/s3/idrive": { + "/storage/s3/ibmcos": { "post": { "consumes": [ "application/json" @@ -3928,8 +3991,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with IDrive - IDrive e2", - "operationId": "CreateS3IDriveStorage", + "summary": "Create S3 storage with IBMCOS - IBM COS S3", + "operationId": "CreateS3IBMCOSStorage", "parameters": [ { "description": "Request body", @@ -3937,7 +4000,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IDriveStorageRequest" + "$ref": "#/definitions/storage.createS3IBMCOSStorageRequest" } } ], @@ -3963,7 +4026,7 @@ } } }, - "/storage/s3/ionos": { + "/storage/s3/idrive": { "post": { "consumes": [ "application/json" @@ -3974,8 +4037,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with IONOS - IONOS Cloud", - "operationId": "CreateS3IONOSStorage", + "summary": "Create S3 storage with IDrive - IDrive e2", + "operationId": "CreateS3IDriveStorage", "parameters": [ { "description": "Request body", @@ -3983,7 +4046,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3IONOSStorageRequest" + "$ref": "#/definitions/storage.createS3IDriveStorageRequest" } } ], @@ -4009,7 +4072,7 @@ } } }, - "/storage/s3/liara": { + "/storage/s3/ionos": { "post": { "consumes": [ "application/json" @@ -4020,8 +4083,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Liara - Liara Object Storage", - "operationId": "CreateS3LiaraStorage", + "summary": "Create S3 storage with IONOS - IONOS Cloud", + "operationId": "CreateS3IONOSStorage", "parameters": [ { "description": "Request body", @@ -4029,7 +4092,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LiaraStorageRequest" + "$ref": "#/definitions/storage.createS3IONOSStorageRequest" } } ], @@ -4055,7 +4118,7 @@ } } }, - "/storage/s3/lyvecloud": { + "/storage/s3/leviia": { "post": { "consumes": [ "application/json" @@ -4066,8 +4129,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", - "operationId": "CreateS3LyveCloudStorage", + "summary": "Create S3 storage with Leviia - Leviia Object Storage", + "operationId": "CreateS3LeviiaStorage", "parameters": [ { "description": "Request body", @@ -4075,7 +4138,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" + "$ref": "#/definitions/storage.createS3LeviiaStorageRequest" } } ], @@ -4101,7 +4164,7 @@ } } }, - "/storage/s3/minio": { + "/storage/s3/liara": { "post": { "consumes": [ "application/json" @@ -4112,8 +4175,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Minio - Minio Object Storage", - "operationId": "CreateS3MinioStorage", + "summary": "Create S3 storage with Liara - Liara Object Storage", + "operationId": "CreateS3LiaraStorage", "parameters": [ { "description": "Request body", @@ -4121,7 +4184,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3MinioStorageRequest" + "$ref": "#/definitions/storage.createS3LiaraStorageRequest" } } ], @@ -4147,7 +4210,7 @@ } } }, - "/storage/s3/netease": { + "/storage/s3/linode": { "post": { "consumes": [ "application/json" @@ -4158,8 +4221,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", - "operationId": "CreateS3NeteaseStorage", + "summary": "Create S3 storage with Linode - Linode Object Storage", + "operationId": "CreateS3LinodeStorage", "parameters": [ { "description": "Request body", @@ -4167,7 +4230,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" + "$ref": "#/definitions/storage.createS3LinodeStorageRequest" } } ], @@ -4193,7 +4256,7 @@ } } }, - "/storage/s3/other": { + "/storage/s3/lyvecloud": { "post": { "consumes": [ "application/json" @@ -4204,8 +4267,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Other - Any other S3 compatible provider", - "operationId": "CreateS3OtherStorage", + "summary": "Create S3 storage with LyveCloud - Seagate Lyve Cloud", + "operationId": "CreateS3LyveCloudStorage", "parameters": [ { "description": "Request body", @@ -4213,7 +4276,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3OtherStorageRequest" + "$ref": "#/definitions/storage.createS3LyveCloudStorageRequest" } } ], @@ -4239,7 +4302,7 @@ } } }, - "/storage/s3/qiniu": { + "/storage/s3/magalu": { "post": { "consumes": [ "application/json" @@ -4250,8 +4313,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", - "operationId": "CreateS3QiniuStorage", + "summary": "Create S3 storage with Magalu - Magalu Object Storage", + "operationId": "CreateS3MagaluStorage", "parameters": [ { "description": "Request body", @@ -4259,7 +4322,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3QiniuStorageRequest" + "$ref": "#/definitions/storage.createS3MagaluStorageRequest" } } ], @@ -4285,7 +4348,7 @@ } } }, - "/storage/s3/rackcorp": { + "/storage/s3/minio": { "post": { "consumes": [ "application/json" @@ -4296,8 +4359,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", - "operationId": "CreateS3RackCorpStorage", + "summary": "Create S3 storage with Minio - Minio Object Storage", + "operationId": "CreateS3MinioStorage", "parameters": [ { "description": "Request body", @@ -4305,7 +4368,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" + "$ref": "#/definitions/storage.createS3MinioStorageRequest" } } ], @@ -4331,7 +4394,7 @@ } } }, - "/storage/s3/scaleway": { + "/storage/s3/netease": { "post": { "consumes": [ "application/json" @@ -4342,8 +4405,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", - "operationId": "CreateS3ScalewayStorage", + "summary": "Create S3 storage with Netease - Netease Object Storage (NOS)", + "operationId": "CreateS3NeteaseStorage", "parameters": [ { "description": "Request body", @@ -4351,7 +4414,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" + "$ref": "#/definitions/storage.createS3NeteaseStorageRequest" } } ], @@ -4377,7 +4440,7 @@ } } }, - "/storage/s3/seaweedfs": { + "/storage/s3/other": { "post": { "consumes": [ "application/json" @@ -4388,8 +4451,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", - "operationId": "CreateS3SeaweedFSStorage", + "summary": "Create S3 storage with Other - Any other S3 compatible provider", + "operationId": "CreateS3OtherStorage", "parameters": [ { "description": "Request body", @@ -4397,7 +4460,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" + "$ref": "#/definitions/storage.createS3OtherStorageRequest" } } ], @@ -4423,7 +4486,7 @@ } } }, - "/storage/s3/stackpath": { + "/storage/s3/petabox": { "post": { "consumes": [ "application/json" @@ -4434,8 +4497,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with StackPath - StackPath Object Storage", - "operationId": "CreateS3StackPathStorage", + "summary": "Create S3 storage with Petabox - Petabox Object Storage", + "operationId": "CreateS3PetaboxStorage", "parameters": [ { "description": "Request body", @@ -4443,7 +4506,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StackPathStorageRequest" + "$ref": "#/definitions/storage.createS3PetaboxStorageRequest" } } ], @@ -4469,7 +4532,7 @@ } } }, - "/storage/s3/storj": { + "/storage/s3/qiniu": { "post": { "consumes": [ "application/json" @@ -4480,8 +4543,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", - "operationId": "CreateS3StorjStorage", + "summary": "Create S3 storage with Qiniu - Qiniu Object Storage (Kodo)", + "operationId": "CreateS3QiniuStorage", "parameters": [ { "description": "Request body", @@ -4489,7 +4552,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3StorjStorageRequest" + "$ref": "#/definitions/storage.createS3QiniuStorageRequest" } } ], @@ -4515,7 +4578,7 @@ } } }, - "/storage/s3/tencentcos": { + "/storage/s3/rackcorp": { "post": { "consumes": [ "application/json" @@ -4526,8 +4589,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", - "operationId": "CreateS3TencentCOSStorage", + "summary": "Create S3 storage with RackCorp - RackCorp Object Storage", + "operationId": "CreateS3RackCorpStorage", "parameters": [ { "description": "Request body", @@ -4535,7 +4598,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" + "$ref": "#/definitions/storage.createS3RackCorpStorageRequest" } } ], @@ -4561,7 +4624,7 @@ } } }, - "/storage/s3/wasabi": { + "/storage/s3/rclone": { "post": { "consumes": [ "application/json" @@ -4572,8 +4635,8 @@ "tags": [ "Storage" ], - "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", - "operationId": "CreateS3WasabiStorage", + "summary": "Create S3 storage with Rclone - Rclone S3 Server", + "operationId": "CreateS3RcloneStorage", "parameters": [ { "description": "Request body", @@ -4581,7 +4644,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createS3WasabiStorageRequest" + "$ref": "#/definitions/storage.createS3RcloneStorageRequest" } } ], @@ -4607,7 +4670,7 @@ } } }, - "/storage/seafile": { + "/storage/s3/scaleway": { "post": { "consumes": [ "application/json" @@ -4618,8 +4681,8 @@ "tags": [ "Storage" ], - "summary": "Create Seafile storage", - "operationId": "CreateSeafileStorage", + "summary": "Create S3 storage with Scaleway - Scaleway Object Storage", + "operationId": "CreateS3ScalewayStorage", "parameters": [ { "description": "Request body", @@ -4627,7 +4690,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSeafileStorageRequest" + "$ref": "#/definitions/storage.createS3ScalewayStorageRequest" } } ], @@ -4653,7 +4716,7 @@ } } }, - "/storage/sftp": { + "/storage/s3/seaweedfs": { "post": { "consumes": [ "application/json" @@ -4664,8 +4727,8 @@ "tags": [ "Storage" ], - "summary": "Create Sftp storage", - "operationId": "CreateSftpStorage", + "summary": "Create S3 storage with SeaweedFS - SeaweedFS S3", + "operationId": "CreateS3SeaweedFSStorage", "parameters": [ { "description": "Request body", @@ -4673,7 +4736,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSftpStorageRequest" + "$ref": "#/definitions/storage.createS3SeaweedFSStorageRequest" } } ], @@ -4699,7 +4762,7 @@ } } }, - "/storage/sharefile": { + "/storage/s3/stackpath": { "post": { "consumes": [ "application/json" @@ -4710,8 +4773,8 @@ "tags": [ "Storage" ], - "summary": "Create Sharefile storage", - "operationId": "CreateSharefileStorage", + "summary": "Create S3 storage with StackPath - StackPath Object Storage", + "operationId": "CreateS3StackPathStorage", "parameters": [ { "description": "Request body", @@ -4719,7 +4782,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSharefileStorageRequest" + "$ref": "#/definitions/storage.createS3StackPathStorageRequest" } } ], @@ -4745,7 +4808,7 @@ } } }, - "/storage/sia": { + "/storage/s3/storj": { "post": { "consumes": [ "application/json" @@ -4756,8 +4819,8 @@ "tags": [ "Storage" ], - "summary": "Create Sia storage", - "operationId": "CreateSiaStorage", + "summary": "Create S3 storage with Storj - Storj (S3 Compatible Gateway)", + "operationId": "CreateS3StorjStorage", "parameters": [ { "description": "Request body", @@ -4765,7 +4828,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSiaStorageRequest" + "$ref": "#/definitions/storage.createS3StorjStorageRequest" } } ], @@ -4791,7 +4854,7 @@ } } }, - "/storage/smb": { + "/storage/s3/synology": { "post": { "consumes": [ "application/json" @@ -4802,8 +4865,8 @@ "tags": [ "Storage" ], - "summary": "Create Smb storage", - "operationId": "CreateSmbStorage", + "summary": "Create S3 storage with Synology - Synology C2 Object Storage", + "operationId": "CreateS3SynologyStorage", "parameters": [ { "description": "Request body", @@ -4811,7 +4874,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSmbStorageRequest" + "$ref": "#/definitions/storage.createS3SynologyStorageRequest" } } ], @@ -4837,7 +4900,7 @@ } } }, - "/storage/storj/existing": { + "/storage/s3/tencentcos": { "post": { "consumes": [ "application/json" @@ -4848,8 +4911,8 @@ "tags": [ "Storage" ], - "summary": "Create Storj storage with existing - Use an existing access grant.", - "operationId": "CreateStorjExistingStorage", + "summary": "Create S3 storage with TencentCOS - Tencent Cloud Object Storage (COS)", + "operationId": "CreateS3TencentCOSStorage", "parameters": [ { "description": "Request body", @@ -4857,7 +4920,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjExistingStorageRequest" + "$ref": "#/definitions/storage.createS3TencentCOSStorageRequest" } } ], @@ -4883,7 +4946,7 @@ } } }, - "/storage/storj/new": { + "/storage/s3/wasabi": { "post": { "consumes": [ "application/json" @@ -4894,8 +4957,8 @@ "tags": [ "Storage" ], - "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", - "operationId": "CreateStorjNewStorage", + "summary": "Create S3 storage with Wasabi - Wasabi Object Storage", + "operationId": "CreateS3WasabiStorage", "parameters": [ { "description": "Request body", @@ -4903,7 +4966,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createStorjNewStorageRequest" + "$ref": "#/definitions/storage.createS3WasabiStorageRequest" } } ], @@ -4929,7 +4992,7 @@ } } }, - "/storage/sugarsync": { + "/storage/seafile": { "post": { "consumes": [ "application/json" @@ -4940,8 +5003,8 @@ "tags": [ "Storage" ], - "summary": "Create Sugarsync storage", - "operationId": "CreateSugarsyncStorage", + "summary": "Create Seafile storage", + "operationId": "CreateSeafileStorage", "parameters": [ { "description": "Request body", @@ -4949,7 +5012,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + "$ref": "#/definitions/storage.createSeafileStorageRequest" } } ], @@ -4975,7 +5038,7 @@ } } }, - "/storage/swift": { + "/storage/sftp": { "post": { "consumes": [ "application/json" @@ -4986,8 +5049,8 @@ "tags": [ "Storage" ], - "summary": "Create Swift storage", - "operationId": "CreateSwiftStorage", + "summary": "Create Sftp storage", + "operationId": "CreateSftpStorage", "parameters": [ { "description": "Request body", @@ -4995,7 +5058,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createSwiftStorageRequest" + "$ref": "#/definitions/storage.createSftpStorageRequest" } } ], @@ -5021,7 +5084,7 @@ } } }, - "/storage/union": { + "/storage/sharefile": { "post": { "consumes": [ "application/json" @@ -5032,8 +5095,8 @@ "tags": [ "Storage" ], - "summary": "Create Union storage", - "operationId": "CreateUnionStorage", + "summary": "Create Sharefile storage", + "operationId": "CreateSharefileStorage", "parameters": [ { "description": "Request body", @@ -5041,7 +5104,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUnionStorageRequest" + "$ref": "#/definitions/storage.createSharefileStorageRequest" } } ], @@ -5067,7 +5130,7 @@ } } }, - "/storage/uptobox": { + "/storage/sia": { "post": { "consumes": [ "application/json" @@ -5078,8 +5141,8 @@ "tags": [ "Storage" ], - "summary": "Create Uptobox storage", - "operationId": "CreateUptoboxStorage", + "summary": "Create Sia storage", + "operationId": "CreateSiaStorage", "parameters": [ { "description": "Request body", @@ -5087,7 +5150,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createUptoboxStorageRequest" + "$ref": "#/definitions/storage.createSiaStorageRequest" } } ], @@ -5113,7 +5176,7 @@ } } }, - "/storage/webdav": { + "/storage/smb": { "post": { "consumes": [ "application/json" @@ -5124,8 +5187,8 @@ "tags": [ "Storage" ], - "summary": "Create Webdav storage", - "operationId": "CreateWebdavStorage", + "summary": "Create Smb storage", + "operationId": "CreateSmbStorage", "parameters": [ { "description": "Request body", @@ -5133,7 +5196,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createWebdavStorageRequest" + "$ref": "#/definitions/storage.createSmbStorageRequest" } } ], @@ -5159,7 +5222,7 @@ } } }, - "/storage/yandex": { + "/storage/storj/existing": { "post": { "consumes": [ "application/json" @@ -5170,8 +5233,8 @@ "tags": [ "Storage" ], - "summary": "Create Yandex storage", - "operationId": "CreateYandexStorage", + "summary": "Create Storj storage with existing - Use an existing access grant.", + "operationId": "CreateStorjExistingStorage", "parameters": [ { "description": "Request body", @@ -5179,7 +5242,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createYandexStorageRequest" + "$ref": "#/definitions/storage.createStorjExistingStorageRequest" } } ], @@ -5205,7 +5268,7 @@ } } }, - "/storage/zoho": { + "/storage/storj/new": { "post": { "consumes": [ "application/json" @@ -5216,8 +5279,8 @@ "tags": [ "Storage" ], - "summary": "Create Zoho storage", - "operationId": "CreateZohoStorage", + "summary": "Create Storj storage with new - Create a new access grant from satellite address, API key, and passphrase.", + "operationId": "CreateStorjNewStorage", "parameters": [ { "description": "Request body", @@ -5225,7 +5288,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.createZohoStorageRequest" + "$ref": "#/definitions/storage.createStorjNewStorageRequest" } } ], @@ -5251,25 +5314,36 @@ } } }, - "/storage/{name}": { - "delete": { + "/storage/sugarsync": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "tags": [ "Storage" ], - "summary": "Remove a storage", - "operationId": "RemoveStorage", + "summary": "Create Sugarsync storage", + "operationId": "CreateSugarsyncStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createSugarsyncStorageRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } }, "400": { "description": "Bad Request", @@ -5284,8 +5358,10 @@ } } } - }, - "patch": { + } + }, + "/storage/swift": { + "post": { "consumes": [ "application/json" ], @@ -5295,26 +5371,16 @@ "tags": [ "Storage" ], - "summary": "Update a storage connection", - "operationId": "UpdateStorage", + "summary": "Create Swift storage", + "operationId": "CreateSwiftStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "description": "Configuration", - "name": "config", + "description": "Request body", + "name": "request", "in": "body", "required": true, "schema": { - "type": "object", - "additionalProperties": { - "type": "string" - } + "$ref": "#/definitions/storage.createSwiftStorageRequest" } } ], @@ -5340,8 +5406,8 @@ } } }, - "/storage/{name}/explore/{path}": { - "get": { + "/storage/union": { + "post": { "consumes": [ "application/json" ], @@ -5351,32 +5417,24 @@ "tags": [ "Storage" ], - "summary": "Explore directory entries in a storage system", - "operationId": "ExploreStorage", + "summary": "Create Union storage", + "operationId": "CreateUnionStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Path in the storage system to explore", - "name": "path", - "in": "path", - "required": true + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createUnionStorageRequest" + } } ], "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/storage.DirEntry" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5394,8 +5452,8 @@ } } }, - "/storage/{name}/rename": { - "patch": { + "/storage/uptobox": { + "post": { "consumes": [ "application/json" ], @@ -5405,23 +5463,16 @@ "tags": [ "Storage" ], - "summary": "Rename a storage connection", - "operationId": "RenameStorage", + "summary": "Create Uptobox storage", + "operationId": "CreateUptoboxStorage", "parameters": [ { - "type": "string", - "description": "Storage ID or name", - "name": "name", - "in": "path", - "required": true - }, - { - "description": "New storage name", + "description": "Request body", "name": "request", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/storage.RenameRequest" + "$ref": "#/definitions/storage.createUptoboxStorageRequest" } } ], @@ -5447,24 +5498,35 @@ } } }, - "/wallet": { - "get": { + "/storage/webdav": { + "post": { + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "Wallet" + "Storage" + ], + "summary": "Create Webdav storage", + "operationId": "CreateWebdavStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createWebdavStorageRequest" + } + } ], - "summary": "List all imported wallets", - "operationId": "ListWallets", "responses": { "200": { "description": "OK", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/model.Wallet" - } + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5480,7 +5542,9 @@ } } } - }, + } + }, + "/storage/yandex": { "post": { "consumes": [ "application/json" @@ -5489,10 +5553,10 @@ "application/json" ], "tags": [ - "Wallet" + "Storage" ], - "summary": "Import a private key", - "operationId": "ImportWallet", + "summary": "Create Yandex storage", + "operationId": "CreateYandexStorage", "parameters": [ { "description": "Request body", @@ -5500,7 +5564,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/wallet.ImportRequest" + "$ref": "#/definitions/storage.createYandexStorageRequest" } } ], @@ -5508,7 +5572,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/model.Wallet" + "$ref": "#/definitions/model.Storage" } }, "400": { @@ -5526,18 +5590,64 @@ } } }, - "/wallet/{address}": { + "/storage/zoho": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Create Zoho storage", + "operationId": "CreateZohoStorage", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.createZohoStorageRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/storage/{name}": { "delete": { "tags": [ - "Wallet" + "Storage" ], - "summary": "Remove a wallet", - "operationId": "RemoveWallet", + "summary": "Remove a storage", + "operationId": "RemoveStorage", "parameters": [ { "type": "string", - "description": "Address", - "name": "address", + "description": "Storage ID or name", + "name": "name", "in": "path", "required": true } @@ -5559,57 +5669,332 @@ } } } - } - } - }, - "definitions": { - "admin.SetIdentityRequest": { - "type": "object", - "properties": { - "identity": { - "type": "string" + }, + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Update a storage connection", + "operationId": "UpdateStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "Configuration", + "name": "config", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } } } }, - "api.HTTPError": { - "type": "object", - "properties": { - "err": { - "type": "string" + "/storage/{name}/explore/{path}": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Explore directory entries in a storage system", + "operationId": "ExploreStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Path in the storage system to explore", + "name": "path", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/storage.DirEntry" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } } } }, - "dataprep.AddPieceRequest": { - "type": "object", - "required": [ - "pieceCid", - "pieceSize" - ], - "properties": { - "fileSize": { - "description": "File size of the CAR file, this is required for boost online deal", - "type": "integer" - }, - "pieceCid": { - "description": "CID of the piece", - "type": "string" - }, - "pieceSize": { - "description": "Size of the piece", - "type": "string" - }, - "rootCid": { - "description": "Root CID of the CAR file, used to populate the label field of storage deal", - "type": "string" + "/storage/{name}/rename": { + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Storage" + ], + "summary": "Rename a storage connection", + "operationId": "RenameStorage", + "parameters": [ + { + "type": "string", + "description": "Storage ID or name", + "name": "name", + "in": "path", + "required": true + }, + { + "description": "New storage name", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/storage.RenameRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Storage" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } } } }, - "dataprep.CreateRequest": { - "type": "object", - "required": [ - "name" - ], - "properties": { + "/wallet": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "List all imported wallets", + "operationId": "ListWallets", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Wallet" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + }, + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Wallet" + ], + "summary": "Import a private key", + "operationId": "ImportWallet", + "parameters": [ + { + "description": "Request body", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/wallet.ImportRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/model.Wallet" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + }, + "/wallet/{address}": { + "delete": { + "tags": [ + "Wallet" + ], + "summary": "Remove a wallet", + "operationId": "RemoveWallet", + "parameters": [ + { + "type": "string", + "description": "Address", + "name": "address", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.HTTPError" + } + } + } + } + } + }, + "definitions": { + "admin.SetIdentityRequest": { + "type": "object", + "properties": { + "identity": { + "type": "string" + } + } + }, + "api.HTTPError": { + "type": "object", + "properties": { + "err": { + "type": "string" + } + } + }, + "dataprep.AddPieceRequest": { + "type": "object", + "required": [ + "pieceCid", + "pieceSize" + ], + "properties": { + "fileSize": { + "description": "File size of the CAR file, this is required for boost online deal", + "type": "integer" + }, + "pieceCid": { + "description": "CID of the piece", + "type": "string" + }, + "pieceSize": { + "description": "Size of the piece", + "type": "string" + }, + "rootCid": { + "description": "Root CID of the CAR file, used to populate the label field of storage deal", + "type": "string" + } + } + }, + "dataprep.CreateRequest": { + "type": "object", + "required": [ + "name" + ], + "properties": { "deleteAfterExport": { "description": "Whether to delete the source files after export", "type": "boolean", @@ -5659,6 +6044,19 @@ } } }, + "dataprep.DeletePieceRequest": { + "type": "object", + "properties": { + "deleteCar": { + "description": "Delete the physical CAR file from storage (default: true)", + "type": "boolean" + }, + "force": { + "description": "Delete even if deals reference this piece", + "type": "boolean" + } + } + }, "dataprep.DirEntry": { "type": "object", "properties": { @@ -5943,6 +6341,10 @@ "jobId": { "type": "integer" }, + "minPieceSizePadding": { + "description": "MinPieceSizePadding tracks virtual padding for inline mode only. Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: always 0, literal zeros are written to CAR file for Curio TreeD compatibility.", + "type": "integer" + }, "numOfFiles": { "type": "integer" }, @@ -5957,7 +6359,7 @@ "type": "string" }, "preparationId": { - "description": "Association", + "description": "Association - SET NULL for fast prep deletion, async cleanup", "type": "integer" }, "rootCid": { @@ -6160,7 +6562,7 @@ "type": "object", "properties": { "attachmentId": { - "description": "Associations", + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", "type": "integer" }, "cid": { @@ -6245,7 +6647,7 @@ "$ref": "#/definitions/model.JobType" }, "workerId": { - "description": "Associations", + "description": "Associations - AttachmentID SET NULL for fast prep deletion, async cleanup", "type": "string" } } @@ -6760,55 +7162,11 @@ } } }, - "storage.acdConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" - }, - "checkpoint": { - "description": "Checkpoint for internal polling (debug).", - "type": "string" - }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" - }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" - }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "templinkThreshold": { - "description": "Files \u003e= this size will be downloaded via their tempLink.", - "type": "string", - "default": "9Gi" - }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" - }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - }, - "uploadWaitPerGb": { - "description": "Additional time per GiB to wait after a failed complete upload to see if it appears.", - "type": "string", - "default": "3m0s" - } - } - }, "storage.azureblobConfig": { "type": "object", "properties": { "accessTier": { - "description": "Access tier of blob: hot, cool or archive.", + "description": "Access tier of blob: hot, cool, cold or archive.", "type": "string" }, "account": { @@ -6846,6 +7204,20 @@ "type": "boolean", "default": false }, + "deleteSnapshots": { + "description": "Set to specify how to deal with snapshots on blob deletion.", + "type": "string", + "example": "" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -6875,12 +7247,12 @@ "default": 5000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -6969,13 +7341,17 @@ "type": "string", "default": "4Gi" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "disableChecksum": { "description": "Disable checksums for large (\u003e upload cutoff) files.", "type": "boolean", "default": false }, "downloadAuthDuration": { - "description": "Time before the authorization token will expire in s or suffix ms|s|m|h|d.", + "description": "Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.", "type": "string", "default": "1w" }, @@ -7001,13 +7377,18 @@ "description": "Application Key.", "type": "string" }, + "lifecycle": { + "description": "Set the number of days deleted files should be kept when creating a bucket.", + "type": "integer", + "default": 0 + }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -7015,6 +7396,11 @@ "description": "A flag string for X-Bz-Test-Mode header for debugging.", "type": "string" }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 4 + }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", @@ -7065,11 +7451,19 @@ "type": "integer", "default": 100 }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot" }, + "impersonate": { + "description": "Impersonate this user ID when using a service account.", + "type": "string" + }, "listChunk": { "description": "Size of listing chunk 1-1000.", "type": "integer", @@ -7099,7 +7493,7 @@ } } }, - "storage.createAcdStorageRequest": { + "storage.createAzureblobStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7114,7 +7508,7 @@ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.acdConfig" + "$ref": "#/definitions/storage.azureblobConfig" } ] }, @@ -7129,7 +7523,7 @@ } } }, - "storage.createAzureblobStorageRequest": { + "storage.createB2StorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7144,7 +7538,7 @@ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.azureblobConfig" + "$ref": "#/definitions/storage.b2Config" } ] }, @@ -7159,7 +7553,7 @@ } } }, - "storage.createB2StorageRequest": { + "storage.createBoxStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7174,7 +7568,7 @@ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.b2Config" + "$ref": "#/definitions/storage.boxConfig" } ] }, @@ -7189,7 +7583,7 @@ } } }, - "storage.createBoxStorageRequest": { + "storage.createDriveStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7204,7 +7598,7 @@ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.boxConfig" + "$ref": "#/definitions/storage.driveConfig" } ] }, @@ -7219,7 +7613,7 @@ } } }, - "storage.createDriveStorageRequest": { + "storage.createDropboxStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7234,7 +7628,7 @@ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.driveConfig" + "$ref": "#/definitions/storage.dropboxConfig" } ] }, @@ -7249,7 +7643,7 @@ } } }, - "storage.createDropboxStorageRequest": { + "storage.createFichierStorageRequest": { "type": "object", "properties": { "clientConfig": { @@ -7264,37 +7658,7 @@ "description": "config for the storage", "allOf": [ { - "$ref": "#/definitions/storage.dropboxConfig" - } - ] - }, - "name": { - "description": "Name of the storage, must be unique", - "type": "string", - "example": "my-storage" - }, - "path": { - "description": "Path of the storage", - "type": "string" - } - } - }, - "storage.createFichierStorageRequest": { - "type": "object", - "properties": { - "clientConfig": { - "description": "config for underlying HTTP client", - "allOf": [ - { - "$ref": "#/definitions/model.ClientConfig" - } - ] - }, - "config": { - "description": "config for the storage", - "allOf": [ - { - "$ref": "#/definitions/storage.fichierConfig" + "$ref": "#/definitions/storage.fichierConfig" } ] }, @@ -7969,6 +8333,36 @@ } } }, + "storage.createOosWorkload_identity_authStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.oosWorkload_identity_authConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createOpendriveStorageRequest": { "type": "object", "properties": { @@ -8359,6 +8753,36 @@ } } }, + "storage.createS3GCSStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3GCSConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3HuaweiOBSStorageRequest": { "type": "object", "properties": { @@ -8479,6 +8903,36 @@ } } }, + "storage.createS3LeviiaStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LeviiaConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3LiaraStorageRequest": { "type": "object", "properties": { @@ -8509,6 +8963,36 @@ } } }, + "storage.createS3LinodeStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3LinodeConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3LyveCloudStorageRequest": { "type": "object", "properties": { @@ -8539,6 +9023,36 @@ } } }, + "storage.createS3MagaluStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3MagaluConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3MinioStorageRequest": { "type": "object", "properties": { @@ -8629,6 +9143,36 @@ } } }, + "storage.createS3PetaboxStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3PetaboxConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3QiniuStorageRequest": { "type": "object", "properties": { @@ -8689,6 +9233,36 @@ } } }, + "storage.createS3RcloneStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3RcloneConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3ScalewayStorageRequest": { "type": "object", "properties": { @@ -8809,6 +9383,36 @@ } } }, + "storage.createS3SynologyStorageRequest": { + "type": "object", + "properties": { + "clientConfig": { + "description": "config for underlying HTTP client", + "allOf": [ + { + "$ref": "#/definitions/model.ClientConfig" + } + ] + }, + "config": { + "description": "config for the storage", + "allOf": [ + { + "$ref": "#/definitions/storage.s3SynologyConfig" + } + ] + }, + "name": { + "description": "Name of the storage, must be unique", + "type": "string", + "example": "my-storage" + }, + "path": { + "description": "Path of the storage", + "type": "string" + } + } + }, "storage.createS3TencentCOSStorageRequest": { "type": "object", "properties": { @@ -9312,222 +9916,2088 @@ "type": "boolean", "default": false }, - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "chunkSize": { + "description": "Upload chunk size.", + "type": "string", + "default": "8Mi" + }, + "clientId": { + "description": "Google Application Client Id", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "copyShortcutContent": { + "description": "Server side copy contents of shortcuts instead of the shortcut.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableHttp2": { + "description": "Disable drive using http2.", + "type": "boolean", + "default": true + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "InvalidUtf8" + }, + "envAuth": { + "description": "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "exportFormats": { + "description": "Comma separated list of preferred formats for downloading Google docs.", + "type": "string", + "default": "docx,xlsx,pptx,svg" + }, + "fastListBugFix": { + "description": "Work around a bug in Google Drive listing.", + "type": "boolean", + "default": true + }, + "formats": { + "description": "Deprecated: See export_formats.", + "type": "string" + }, + "impersonate": { + "description": "Impersonate this user when using a service account.", + "type": "string" + }, + "importFormats": { + "description": "Comma separated list of preferred formats for uploading Google docs.", + "type": "string" + }, + "keepRevisionForever": { + "description": "Keep new head revision of each file forever.", + "type": "boolean", + "default": false + }, + "listChunk": { + "description": "Size of listing chunk 100-1000, 0 to disable.", + "type": "integer", + "default": 1000 + }, + "metadataLabels": { + "description": "Control whether labels should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "metadataOwner": { + "description": "Control whether owner should be read or written in metadata.", + "type": "string", + "default": "read", + "example": "off" + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "pacerBurst": { + "description": "Number of API calls to allow without sleeping.", + "type": "integer", + "default": 100 + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "100ms" + }, + "resourceKey": { + "description": "Resource key for accessing a link-shared file.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "scope": { + "description": "Comma separated list of scopes that rclone should use when requesting access from drive.", + "type": "string", + "example": "drive" + }, + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "sharedWithMe": { + "description": "Only show files that are shared with me.", + "type": "boolean", + "default": false + }, + "showAllGdocs": { + "description": "Show all Google Docs including non-exportable ones in listings.", + "type": "boolean", + "default": false + }, + "sizeAsQuota": { + "description": "Show sizes as storage quota usage, not actual size.", + "type": "boolean", + "default": false + }, + "skipChecksumGphotos": { + "description": "Skip checksums on Google photos and videos only.", + "type": "boolean", + "default": false + }, + "skipDanglingShortcuts": { + "description": "If set skip dangling shortcut files.", + "type": "boolean", + "default": false + }, + "skipGdocs": { + "description": "Skip google documents in all listings.", + "type": "boolean", + "default": false + }, + "skipShortcuts": { + "description": "If set skip shortcut files.", + "type": "boolean", + "default": false + }, + "starredOnly": { + "description": "Only show files that are starred.", + "type": "boolean", + "default": false + }, + "stopOnDownloadLimit": { + "description": "Make download limit errors be fatal.", + "type": "boolean", + "default": false + }, + "stopOnUploadLimit": { + "description": "Make upload limit errors be fatal.", + "type": "boolean", + "default": false + }, + "teamDrive": { + "description": "ID of the Shared Drive (Team Drive).", + "type": "string" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", + "type": "boolean", + "default": false + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "8Mi" + }, + "useCreatedDate": { + "description": "Use file created date instead of modified date.", + "type": "boolean", + "default": false + }, + "useSharedDate": { + "description": "Use date file was shared instead of modified date.", + "type": "boolean", + "default": false + }, + "useTrash": { + "description": "Send files to the trash instead of deleting permanently.", + "type": "boolean", + "default": true + }, + "v2DownloadMinSize": { + "description": "If Object's are greater, use drive v2 API to download.", + "type": "string", + "default": "off" + } + } + }, + "storage.dropboxConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "chunkSize": { + "description": "Upload chunk size (\u003c 150Mi).", + "type": "string", + "default": "48Mi" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + }, + "impersonate": { + "description": "Impersonate this user when using a business account.", + "type": "string" + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "10ms" + }, + "rootNamespace": { + "description": "Specify a different Dropbox namespace ID to use as the root for all paths.", + "type": "string" + }, + "sharedFiles": { + "description": "Instructs rclone to work on individual shared files.", + "type": "boolean", + "default": false + }, + "sharedFolders": { + "description": "Instructs rclone to work on shared folders.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.fichierConfig": { + "type": "object", + "properties": { + "apiKey": { + "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", + "type": "string" + }, + "cdn": { + "description": "Set if you wish to use CDN download links.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" + }, + "filePassword": { + "description": "If you want to download a shared file that is password protected, add this parameter.", + "type": "string" + }, + "folderPassword": { + "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "type": "string" + }, + "sharedFolder": { + "description": "If you want to download a shared folder, add this parameter.", + "type": "string" + } + } + }, + "storage.filefabricConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,InvalidUtf8,Dot" + }, + "permanentToken": { + "description": "Permanent Authentication Token.", + "type": "string" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "token": { + "description": "Session Token.", + "type": "string" + }, + "tokenExpiry": { + "description": "Token expiry time.", + "type": "string" + }, + "url": { + "description": "URL of the Enterprise File Fabric to connect to.", + "type": "string", + "example": "https://storagemadeeasy.com" + }, + "version": { + "description": "Version read from the file fabric.", + "type": "string" + } + } + }, + "storage.ftpConfig": { + "type": "object", + "properties": { + "askPassword": { + "description": "Allow asking for FTP password when needed.", + "type": "boolean", + "default": false + }, + "closeTimeout": { + "description": "Maximum time to wait for a response to close.", + "type": "string", + "default": "1m0s" + }, + "concurrency": { + "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "type": "integer", + "default": 0 + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableEpsv": { + "description": "Disable using EPSV even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableMlsd": { + "description": "Disable using MLSD even if server advertises support.", + "type": "boolean", + "default": false + }, + "disableTls13": { + "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", + "type": "boolean", + "default": false + }, + "disableUtf8": { + "description": "Disable using UTF-8 even if server advertises support.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Del,Ctl,RightSpace,Dot", + "example": "Asterisk,Ctl,Dot,Slash" + }, + "explicitTls": { + "description": "Use Explicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "forceListHidden": { + "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", + "type": "boolean", + "default": false + }, + "host": { + "description": "FTP host to connect to.", + "type": "string" + }, + "idleTimeout": { + "description": "Max time before closing idle connections.", + "type": "string", + "default": "1m0s" + }, + "noCheckCertificate": { + "description": "Do not verify the TLS certificate of the server.", + "type": "boolean", + "default": false + }, + "pass": { + "description": "FTP password.", + "type": "string" + }, + "port": { + "description": "FTP port number.", + "type": "integer", + "default": 21 + }, + "shutTimeout": { + "description": "Maximum time to wait for data connection closing status.", + "type": "string", + "default": "1m0s" + }, + "socksProxy": { + "description": "Socks 5 proxy host.", + "type": "string" + }, + "tls": { + "description": "Use Implicit FTPS (FTP over TLS).", + "type": "boolean", + "default": false + }, + "tlsCacheSize": { + "description": "Size of TLS session cache for all control and data connections.", + "type": "integer", + "default": 32 + }, + "user": { + "description": "FTP username.", + "type": "string", + "default": "$USER" + }, + "writingMdtm": { + "description": "Use MDTM to set modification time (VsFtpd quirk)", + "type": "boolean", + "default": false + } + } + }, + "storage.gcsConfig": { + "type": "object", + "properties": { + "anonymous": { + "description": "Access public buckets and objects without credentials.", + "type": "boolean", + "default": false + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "bucketAcl": { + "description": "Access Control List for new buckets.", + "type": "string", + "example": "authenticatedRead" + }, + "bucketPolicyOnly": { + "description": "Access checks should use bucket-level IAM policies.", + "type": "boolean", + "default": false + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,CrLf,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string" + }, + "envAuth": { + "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "location": { + "description": "Location for the newly created buckets.", + "type": "string", + "example": "" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "objectAcl": { + "description": "Access Control List for new objects.", + "type": "string", + "example": "authenticatedRead" + }, + "projectNumber": { + "description": "Project number.", + "type": "string" + }, + "serviceAccountCredentials": { + "description": "Service Account Credentials JSON blob.", + "type": "string" + }, + "serviceAccountFile": { + "description": "Service Account Credentials JSON file path.", + "type": "string" + }, + "storageClass": { + "description": "The storage class to use when storing objects in Google Cloud Storage.", + "type": "string", + "example": "" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "userProject": { + "description": "User project.", + "type": "string" + } + } + }, + "storage.gphotosConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "batchCommitTimeout": { + "description": "Max time to wait for a batch to finish committing", + "type": "string", + "default": "10m0s" + }, + "batchMode": { + "description": "Upload file batching sync|async|off.", + "type": "string", + "default": "sync" + }, + "batchSize": { + "description": "Max number of files in upload batch.", + "type": "integer", + "default": 0 + }, + "batchTimeout": { + "description": "Max time to allow an idle upload batch before uploading.", + "type": "string", + "default": "0s" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,CrLf,InvalidUtf8,Dot" + }, + "includeArchived": { + "description": "Also view and download archived media.", + "type": "boolean", + "default": false + }, + "readOnly": { + "description": "Set to make the Google Photos backend read only.", + "type": "boolean", + "default": false + }, + "readSize": { + "description": "Set to read the size of media items.", + "type": "boolean", + "default": false + }, + "startYear": { + "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", + "type": "integer", + "default": 2000 + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.hdfsConfig": { + "type": "object", + "properties": { + "dataTransferProtection": { + "description": "Kerberos data transfer protection: authentication|integrity|privacy.", + "type": "string", + "example": "privacy" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" + }, + "namenode": { + "description": "Hadoop name nodes and ports.", + "type": "string" + }, + "servicePrincipalName": { + "description": "Kerberos service principal name for the namenode.", + "type": "string" + }, + "username": { + "description": "Hadoop user name.", + "type": "string", + "example": "root" + } + } + }, + "storage.hidriveConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "chunkSize": { + "description": "Chunksize for chunked uploads.", + "type": "string", + "default": "48Mi" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableFetchingMemberCount": { + "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" + }, + "endpoint": { + "description": "Endpoint for the service.", + "type": "string", + "default": "https://api.hidrive.strato.com/2.1" + }, + "rootPrefix": { + "description": "The root/parent folder for all paths.", + "type": "string", + "default": "/", + "example": "/" + }, + "scopeAccess": { + "description": "Access permissions that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "rw", + "example": "rw" + }, + "scopeRole": { + "description": "User-level that rclone should use when requesting access from HiDrive.", + "type": "string", + "default": "user", + "example": "user" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for chunked uploads.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff/Threshold for chunked uploads.", + "type": "string", + "default": "96Mi" + } + } + }, + "storage.httpConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "headers": { + "description": "Set HTTP headers for all transactions.", + "type": "string" + }, + "noEscape": { + "description": "Do not escape URL metacharacters in path names.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "Don't use HEAD requests.", + "type": "boolean", + "default": false + }, + "noSlash": { + "description": "Set this if the site doesn't end directories with /.", + "type": "boolean", + "default": false + }, + "url": { + "description": "URL of HTTP host to connect to.", + "type": "string" + } + } + }, + "storage.internetarchiveConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "IAS3 Access Key.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't ask the server to test against MD5 checksum calculated by rclone.", + "type": "boolean", + "default": true + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "IAS3 Endpoint.", + "type": "string", + "default": "https://s3.us.archive.org" + }, + "frontEndpoint": { + "description": "Host of InternetArchive Frontend.", + "type": "string", + "default": "https://archive.org" + }, + "secretAccessKey": { + "description": "IAS3 Secret Key (password).", + "type": "string" + }, + "waitArchive": { + "description": "Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.", + "type": "string", + "default": "0s" + } + } + }, + "storage.jottacloudConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" + }, + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false + }, + "md5MemoryLimit": { + "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", + "type": "string", + "default": "10Mi" + }, + "noVersions": { + "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "trashedOnly": { + "description": "Only show files that are in the trash.", + "type": "boolean", + "default": false + }, + "uploadResumeLimit": { + "description": "Files bigger than this can be resumed if the upload fail's.", + "type": "string", + "default": "10Mi" + } + } + }, + "storage.koofrDigistorageConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" + }, + "password": { + "description": "Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password.", + "type": "string" + }, + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.koofrKoofrConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" + }, + "password": { + "description": "Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password.", + "type": "string" + }, + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.koofrOtherConfig": { + "type": "object", + "properties": { + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "The Koofr API endpoint to use.", + "type": "string" + }, + "mountid": { + "description": "Mount ID of the mount to use.", + "type": "string" + }, + "password": { + "description": "Your password for rclone (generate one at your service's settings page).", + "type": "string" + }, + "setmtime": { + "description": "Does the backend support setting modification time.", + "type": "boolean", + "default": true + }, + "user": { + "description": "Your user name.", + "type": "string" + } + } + }, + "storage.localConfig": { + "type": "object", + "properties": { + "caseInsensitive": { + "description": "Force the filesystem to report itself as case insensitive.", + "type": "boolean", + "default": false + }, + "caseSensitive": { + "description": "Force the filesystem to report itself as case sensitive.", + "type": "boolean", + "default": false + }, + "copyLinks": { + "description": "Follow symlinks and copy the pointed to item.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,Dot" + }, + "links": { + "description": "Translate symlinks to/from regular files with a '.rclonelink' extension.", + "type": "boolean", + "default": false + }, + "noCheckUpdated": { + "description": "Don't check to see if the files change during upload.", + "type": "boolean", + "default": false + }, + "noClone": { + "description": "Disable reflink cloning for server-side copies.", + "type": "boolean", + "default": false + }, + "noPreallocate": { + "description": "Disable preallocation of disk space for transferred files.", + "type": "boolean", + "default": false + }, + "noSetModtime": { + "description": "Disable setting modtime.", + "type": "boolean", + "default": false + }, + "noSparse": { + "description": "Disable sparse files for multi-thread downloads.", + "type": "boolean", + "default": false + }, + "nounc": { + "description": "Disable UNC (long path names) conversion on Windows.", + "type": "boolean", + "default": false, + "example": true + }, + "oneFileSystem": { + "description": "Don't cross filesystem boundaries (unix/macOS only).", + "type": "boolean", + "default": false + }, + "skipLinks": { + "description": "Don't warn about skipped symlinks.", + "type": "boolean", + "default": false + }, + "timeType": { + "description": "Set what kind of time is returned.", + "type": "string", + "default": "mtime", + "example": "mtime" + }, + "unicodeNormalization": { + "description": "Apply unicode NFC normalization to paths and filenames.", + "type": "boolean", + "default": false + }, + "zeroSizeLinks": { + "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "type": "boolean", + "default": false + } + } + }, + "storage.mailruConfig": { + "type": "object", + "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "checkHash": { + "description": "What should copy do if file checksum is mismatched or invalid.", + "type": "boolean", + "default": true, + "example": true + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" + }, + "pass": { + "description": "Password.", + "type": "string" + }, + "quirks": { + "description": "Comma separated list of internal maintenance flags.", + "type": "string" + }, + "speedupEnable": { + "description": "Skip full upload if there is another file with same data hash.", + "type": "boolean", + "default": true, + "example": true + }, + "speedupFilePatterns": { + "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", + "type": "string", + "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", + "example": "" + }, + "speedupMaxDisk": { + "description": "This option allows you to disable speedup (put by hash) for large files.", + "type": "string", + "default": "3Gi", + "example": "0" + }, + "speedupMaxMemory": { + "description": "Files larger than the size given below will always be hashed on disk.", + "type": "string", + "default": "32Mi", + "example": "0" + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, + "user": { + "description": "User name (usually email).", + "type": "string" + }, + "userAgent": { + "description": "HTTP user agent used internally by client.", + "type": "string" + } + } + }, + "storage.megaConfig": { + "type": "object", + "properties": { + "debug": { + "description": "Output more debug from Mega.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "hardDelete": { + "description": "Delete files permanently rather than putting them into the trash.", + "type": "boolean", + "default": false + }, + "pass": { + "description": "Password.", + "type": "string" + }, + "useHttps": { + "description": "Use HTTPS for transfers.", + "type": "boolean", + "default": false + }, + "user": { + "description": "User name.", + "type": "string" + } + } + }, + "storage.netstorageConfig": { + "type": "object", + "properties": { + "account": { + "description": "Set the NetStorage account name", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "host": { + "description": "Domain+path of NetStorage host to connect to.", + "type": "string" + }, + "protocol": { + "description": "Select between HTTP or HTTPS protocol.", + "type": "string", + "default": "https", + "example": "http" + }, + "secret": { + "description": "Set the NetStorage account secret/G2O key for authentication.", + "type": "string" + } + } + }, + "storage.onedriveConfig": { + "type": "object", + "properties": { + "accessScopes": { + "description": "Set scopes to be requested by rclone.", + "type": "string", + "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", + "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "avOverride": { + "description": "Allows download of files the server thinks has a virus.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", + "type": "string", + "default": "10Mi" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "delta": { + "description": "If set rclone will use delta listing to implement recursive listings.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableSitePermission": { + "description": "Disable the request for Sites.Read.All permission.", + "type": "boolean", + "default": false + }, + "driveId": { + "description": "The ID of the drive to use.", + "type": "string" + }, + "driveType": { + "description": "The type of the drive (personal | business | documentLibrary).", + "type": "string" + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" + }, + "exposeOnenoteFiles": { + "description": "Set to make OneNote files show up in directory listings.", + "type": "boolean", + "default": false + }, + "hardDelete": { + "description": "Permanently delete files on removal.", + "type": "boolean", + "default": false + }, + "hashType": { + "description": "Specify the hash in use for the backend.", + "type": "string", + "default": "auto", + "example": "auto" + }, + "linkPassword": { + "description": "Set the password for links created by the link command.", + "type": "string" + }, + "linkScope": { + "description": "Set the scope of the links created by the link command.", + "type": "string", + "default": "anonymous", + "example": "anonymous" + }, + "linkType": { + "description": "Set the type of the links created by the link command.", + "type": "string", + "default": "view", + "example": "view" + }, + "listChunk": { + "description": "Size of listing chunk.", + "type": "integer", + "default": 1000 + }, + "metadataPermissions": { + "description": "Control whether permissions should be read or written in metadata.", + "type": "string", + "default": "off", + "example": "off" + }, + "noVersions": { + "description": "Remove all versions on modifying operations.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Choose national cloud region for OneDrive.", + "type": "string", + "default": "global", + "example": "global" + }, + "rootFolderId": { + "description": "ID of the root folder.", + "type": "string" + }, + "serverSideAcrossConfigs": { + "description": "Deprecated: use --server-side-across-configs instead.", + "type": "boolean", + "default": false + }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + } + } + }, + "storage.oosEnv_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosInstance_principal_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosNo_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosResource_principal_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false + }, + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" + }, + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" + }, + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "region": { + "description": "Object storage Region", + "type": "string" + }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" + }, + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" + }, + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" + }, + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosUser_principal_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", + "type": "boolean", + "default": false + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "compartment": { + "description": "Object storage compartment OCID", + "type": "string" + }, + "configFile": { + "description": "Path to OCI config file", + "type": "string", + "default": "~/.oci/config", + "example": "~/.oci/config" + }, + "configProfile": { + "description": "Profile name inside the oci config file", + "type": "string", + "default": "Default", + "example": "Default" }, - "chunkSize": { - "description": "Upload chunk size.", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "8Mi" + "default": "4.656Gi" }, - "clientId": { - "description": "Google Application Client Id", - "type": "string" + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "copyShortcutContent": { - "description": "Server side copy contents of shortcuts instead of the shortcut.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "disableHttp2": { - "description": "Disable drive using http2.", - "type": "boolean", - "default": true - }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "InvalidUtf8" - }, - "exportFormats": { - "description": "Comma separated list of preferred formats for downloading Google docs.", - "type": "string", - "default": "docx,xlsx,pptx,svg" + "default": "Slash,InvalidUtf8,Dot" }, - "formats": { - "description": "Deprecated: See export_formats.", + "endpoint": { + "description": "Endpoint for Object storage API.", "type": "string" }, - "impersonate": { - "description": "Impersonate this user when using a service account.", - "type": "string" + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", + "type": "boolean", + "default": false }, - "importFormats": { - "description": "Comma separated list of preferred formats for uploading Google docs.", + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "namespace": { + "description": "Object storage namespace", "type": "string" }, - "keepRevisionForever": { - "description": "Keep new head revision of each file forever.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "listChunk": { - "description": "Size of listing chunk 100-1000, 0 to disable.", - "type": "integer", - "default": 1000 + "region": { + "description": "Object storage Region", + "type": "string" }, - "pacerBurst": { - "description": "Number of API calls to allow without sleeping.", - "type": "integer", - "default": 100 + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "pacerMinSleep": { - "description": "Minimum time to sleep between API calls.", + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", "type": "string", - "default": "100ms" + "example": "" }, - "resourceKey": { - "description": "Resource key for accessing a link-shared file.", - "type": "string" + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "type": "string", + "example": "" }, - "scope": { - "description": "Scope that rclone should use when requesting access from drive.", + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", "type": "string", - "example": "drive" + "example": "" }, - "serverSideAcrossConfigs": { - "description": "Allow server-side operations (e.g. copy) to work across different drive configs.", + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.oosWorkload_identity_authConfig": { + "type": "object", + "properties": { + "attemptResumeUpload": { + "description": "If true attempt to resume previously started multipart upload for the object.", "type": "boolean", "default": false }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", - "type": "string" + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", + "compartment": { + "description": "Object storage compartment OCID", "type": "string" }, - "sharedWithMe": { - "description": "Only show files that are shared with me.", - "type": "boolean", - "default": false + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" }, - "sizeAsQuota": { - "description": "Show sizes as storage quota usage, not actual size.", - "type": "boolean", - "default": false + "copyTimeout": { + "description": "Timeout for copy.", + "type": "string", + "default": "1m0s" }, - "skipChecksumGphotos": { - "description": "Skip MD5 checksum on Google photos and videos only.", - "type": "boolean", - "default": false + "description": { + "description": "Description of the remote.", + "type": "string" }, - "skipDanglingShortcuts": { - "description": "If set skip dangling shortcut files.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "skipGdocs": { - "description": "Skip google documents in all listings.", - "type": "boolean", - "default": false + "encoding": { + "description": "The encoding for the backend.", + "type": "string", + "default": "Slash,InvalidUtf8,Dot" }, - "skipShortcuts": { - "description": "If set skip shortcut files.", - "type": "boolean", - "default": false + "endpoint": { + "description": "Endpoint for Object storage API.", + "type": "string" }, - "starredOnly": { - "description": "Only show files that are starred.", + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.", "type": "boolean", "default": false }, - "stopOnDownloadLimit": { - "description": "Make download limit errors be fatal.", - "type": "boolean", - "default": false + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "stopOnUploadLimit": { - "description": "Make upload limit errors be fatal.", + "namespace": { + "description": "Object storage namespace", + "type": "string" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "teamDrive": { - "description": "ID of the Shared Drive (Team Drive).", + "region": { + "description": "Object storage Region", "type": "string" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "type": "string", + "example": "" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "sseCustomerKey": { + "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "type": "string", + "example": "" }, - "trashedOnly": { - "description": "Only show files that are in the trash.", - "type": "boolean", - "default": false + "sseCustomerKeyFile": { + "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "type": "string", + "example": "" }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "sseCustomerKeySha256": { + "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", "type": "string", - "default": "8Mi" + "example": "" }, - "useCreatedDate": { - "description": "Use file created date instead of modified date.", - "type": "boolean", - "default": false + "sseKmsKeyId": { + "description": "if using your own master key in vault, this header specifies the", + "type": "string", + "example": "" }, - "useSharedDate": { - "description": "Use date file was shared instead of modified date.", - "type": "boolean", - "default": false + "storageTier": { + "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "type": "string", + "default": "Standard", + "example": "Standard" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", + "type": "integer", + "default": 10 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + } + } + }, + "storage.opendriveConfig": { + "type": "object", + "properties": { + "chunkSize": { + "description": "Files will be uploaded in chunks this size.", + "type": "string", + "default": "10Mi" }, - "useTrash": { - "description": "Send files to the trash instead of deleting permanently.", - "type": "boolean", - "default": true + "description": { + "description": "Description of the remote.", + "type": "string" }, - "v2DownloadMinSize": { - "description": "If Object's are greater, use drive v2 API to download.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "default": "off" + "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" + }, + "password": { + "description": "Password.", + "type": "string" + }, + "username": { + "description": "Username.", + "type": "string" } } }, - "storage.dropboxConfig": { + "storage.pcloudConfig": { "type": "object", "properties": { "authUrl": { "description": "Auth server URL.", "type": "string" }, - "batchCommitTimeout": { - "description": "Max time to wait for a batch to finish committing", - "type": "string", - "default": "10m0s" - }, - "batchMode": { - "description": "Upload file batching sync|async|off.", - "type": "string", - "default": "sync" - }, - "batchSize": { - "description": "Max number of files in upload batch.", - "type": "integer", - "default": 0 - }, - "batchTimeout": { - "description": "Max time to allow an idle upload batch before uploading.", - "type": "string", - "default": "0s" - }, - "chunkSize": { - "description": "Upload chunk size (\u003c 150Mi).", - "type": "string", - "default": "48Mi" - }, "clientId": { "description": "OAuth Client Id.", "type": "string" @@ -9536,24 +12006,29 @@ "description": "OAuth Client Secret.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot" + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "impersonate": { - "description": "Impersonate this user when using a business account.", - "type": "string" + "hostname": { + "description": "Hostname to connect to.", + "type": "string", + "default": "api.pcloud.com", + "example": "api.pcloud.com" }, - "sharedFiles": { - "description": "Instructs rclone to work on individual shared files.", - "type": "boolean", - "default": false + "password": { + "description": "Your pcloud password.", + "type": "string" }, - "sharedFolders": { - "description": "Instructs rclone to work on shared folders.", - "type": "boolean", - "default": false + "rootFolderId": { + "description": "Fill in for rclone to use a non root folder as its starting point.", + "type": "string", + "default": "d0" }, "token": { "description": "OAuth Access Token as a JSON blob.", @@ -9562,1535 +12037,1829 @@ "tokenUrl": { "description": "Token server url.", "type": "string" + }, + "username": { + "description": "Your pcloud username.", + "type": "string" } } }, - "storage.fichierConfig": { + "storage.premiumizemeConfig": { "type": "object", "properties": { "apiKey": { - "description": "Your API Key, get it from https://1fichier.com/console/params.pl.", + "description": "API Key.", + "type": "string" + }, + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot" - }, - "filePassword": { - "description": "If you want to download a shared file that is password protected, add this parameter.", - "type": "string" + "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "folderPassword": { - "description": "If you want to list the files in a shared folder that is password protected, add this parameter.", + "token": { + "description": "OAuth Access Token as a JSON blob.", "type": "string" }, - "sharedFolder": { - "description": "If you want to download a shared folder, add this parameter.", + "tokenUrl": { + "description": "Token server url.", "type": "string" } } }, - "storage.filefabricConfig": { + "storage.putioConfig": { "type": "object", "properties": { - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,Del,Ctl,InvalidUtf8,Dot" - }, - "permanentToken": { - "description": "Permanent Authentication Token.", + "authUrl": { + "description": "Auth server URL.", "type": "string" }, - "rootFolderId": { - "description": "ID of the root folder.", + "clientId": { + "description": "OAuth Client Id.", "type": "string" }, - "token": { - "description": "Session Token.", + "clientSecret": { + "description": "OAuth Client Secret.", "type": "string" }, - "tokenExpiry": { - "description": "Token expiry time.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "url": { - "description": "URL of the Enterprise File Fabric to connect to.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "https://storagemadeeasy.com" + "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" }, - "version": { - "description": "Version read from the file fabric.", + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", "type": "string" } } }, - "storage.ftpConfig": { + "storage.qingstorConfig": { "type": "object", "properties": { - "askPassword": { - "description": "Allow asking for FTP password when needed.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "QingStor Access Key ID.", + "type": "string" }, - "closeTimeout": { - "description": "Maximum time to wait for a response to close.", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "1m0s" + "default": "4Mi" }, - "concurrency": { - "description": "Maximum number of FTP simultaneous connections, 0 for unlimited.", + "connectionRetries": { + "description": "Number of connection retries.", "type": "integer", - "default": 0 - }, - "disableEpsv": { - "description": "Disable using EPSV even if server advertises support.", - "type": "boolean", - "default": false - }, - "disableMlsd": { - "description": "Disable using MLSD even if server advertises support.", - "type": "boolean", - "default": false - }, - "disableTls13": { - "description": "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", - "type": "boolean", - "default": false + "default": 3 }, - "disableUtf8": { - "description": "Disable using UTF-8 even if server advertises support.", - "type": "boolean", - "default": false + "description": { + "description": "Description of the remote.", + "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,Del,Ctl,RightSpace,Dot", - "example": "Asterisk,Ctl,Dot,Slash" - }, - "explicitTls": { - "description": "Use Explicit FTPS (FTP over TLS).", - "type": "boolean", - "default": false - }, - "forceListHidden": { - "description": "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.", - "type": "boolean", - "default": false + "default": "Slash,Ctl,InvalidUtf8" }, - "host": { - "description": "FTP host to connect to.", + "endpoint": { + "description": "Enter an endpoint URL to connection QingStor API.", "type": "string" }, - "idleTimeout": { - "description": "Max time before closing idle connections.", - "type": "string", - "default": "1m0s" - }, - "noCheckCertificate": { - "description": "Do not verify the TLS certificate of the server.", + "envAuth": { + "description": "Get QingStor credentials from runtime.", "type": "boolean", - "default": false + "default": false, + "example": false }, - "pass": { - "description": "FTP password.", + "secretAccessKey": { + "description": "QingStor Secret Access Key (password).", "type": "string" }, - "port": { - "description": "FTP port number.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads.", "type": "integer", - "default": 21 + "default": 1 }, - "shutTimeout": { - "description": "Maximum time to wait for data connection closing status.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "1m0s" - }, - "tls": { - "description": "Use Implicit FTPS (FTP over TLS).", - "type": "boolean", - "default": false - }, - "tlsCacheSize": { - "description": "Size of TLS session cache for all control and data connections.", - "type": "integer", - "default": 32 + "default": "200Mi" }, - "user": { - "description": "FTP username.", + "zone": { + "description": "Zone to connect to.", "type": "string", - "default": "$USER" - }, - "writingMdtm": { - "description": "Use MDTM to set modification time (VsFtpd quirk)", - "type": "boolean", - "default": false + "example": "pek3a" } } }, - "storage.gcsConfig": { + "storage.s3AWSConfig": { "type": "object", "properties": { - "anonymous": { - "description": "Access public buckets and objects without credentials.", - "type": "boolean", - "default": false + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "authUrl": { - "description": "Auth server URL.", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, "bucketAcl": { - "description": "Access Control List for new buckets.", + "description": "Canned ACL used when creating buckets.", "type": "string", - "example": "authenticatedRead" + "example": "private" }, - "bucketPolicyOnly": { - "description": "Access checks should use bucket-level IAM policies.", + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "clientId": { - "description": "OAuth Client Id.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, - "decompress": { - "description": "If set this will decompress gzip encoded objects.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for the service.", + "description": "Endpoint for S3 API.", "type": "string" }, "envAuth": { - "description": "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).", + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", "default": false, "example": false }, - "location": { - "description": "Location for the newly created buckets.", - "type": "string", - "example": "" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "leavePartsOnError": { + "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", "type": "boolean", "default": false }, - "objectAcl": { - "description": "Access Control List for new objects.", - "type": "string", - "example": "authenticatedRead" - }, - "projectNumber": { - "description": "Project number.", - "type": "string" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "serviceAccountCredentials": { - "description": "Service Account Credentials JSON blob.", - "type": "string" + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" }, - "serviceAccountFile": { - "description": "Service Account Credentials JSON file path.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "storageClass": { - "description": "The storage class to use when storing objects in Google Cloud Storage.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string", "example": "" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" - }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - } - } - }, - "storage.gphotosConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,CrLf,InvalidUtf8,Dot" + "default": "unset" }, - "includeArchived": { - "description": "Also view and download archived media.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "readOnly": { - "description": "Set to make the Google Photos backend read only.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "readSize": { - "description": "Set to read the size of media items.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "startYear": { - "description": "Year limits the photos to be downloaded to those which are uploaded after the given year.", - "type": "integer", - "default": 2000 + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" - } - } - }, - "storage.hdfsConfig": { - "type": "object", - "properties": { - "dataTransferProtection": { - "description": "Kerberos data transfer protection: authentication|integrity|privacy.", + "region": { + "description": "Region to connect to.", "type": "string", - "example": "privacy" + "example": "us-east-1" }, - "encoding": { - "description": "The encoding for the backend.", + "requesterPays": { + "description": "Enables requester pays option when interacting with S3 bucket.", + "type": "boolean", + "default": false + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,Colon,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "namenode": { - "description": "Hadoop name node and port.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "servicePrincipalName": { - "description": "Kerberos service principal name for the namenode.", + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "username": { - "description": "Hadoop user name.", - "type": "string", - "example": "root" - } - } - }, - "storage.hidriveConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "chunkSize": { - "description": "Chunksize for chunked uploads.", + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", - "default": "48Mi" + "example": "" }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" }, - "clientSecret": { - "description": "OAuth Client Secret.", + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" + }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "" + }, + "stsEndpoint": { + "description": "Endpoint for STS (deprecated).", "type": "string" }, - "disableFetchingMemberCount": { - "description": "Do not fetch number of objects in directories unless it is absolutely necessary.", + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" + }, + "useAccelerateEndpoint": { + "description": "If true use the AWS S3 accelerated endpoint.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "default": "Slash,Dot" + "default": "unset" }, - "endpoint": { - "description": "Endpoint for the service.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "https://api.hidrive.strato.com/2.1" + "default": "unset" }, - "rootPrefix": { - "description": "The root/parent folder for all paths.", - "type": "string", - "default": "/", - "example": "/" + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "scopeAccess": { - "description": "Access permissions that rclone should use when requesting access from HiDrive.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "rw", - "example": "rw" + "default": "unset" }, - "scopeRole": { - "description": "User-level that rclone should use when requesting access from HiDrive.", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "default": "user", - "example": "user" + "default": "unset" }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for chunked uploads.", - "type": "integer", - "default": 4 + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff/Threshold for chunked uploads.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "96Mi" - } - } - }, - "storage.httpConfig": { - "type": "object", - "properties": { - "headers": { - "description": "Set HTTP headers for all transactions.", - "type": "string" + "default": "off" }, - "noHead": { - "description": "Don't use HEAD requests.", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "noSlash": { - "description": "Set this if the site doesn't end directories with /.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false - }, - "url": { - "description": "URL of HTTP host to connect to.", - "type": "string" } } }, - "storage.internetarchiveConfig": { + "storage.s3AlibabaConfig": { "type": "object", "properties": { "accessKeyId": { - "description": "IAS3 Access Key.", + "description": "AWS Access Key ID.", "type": "string" }, - "disableChecksum": { - "description": "Don't ask the server to test against MD5 checksum calculated by rclone.", - "type": "boolean", - "default": true + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot" + "example": "private" }, - "endpoint": { - "description": "IAS3 Endpoint.", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "https://s3.us.archive.org" + "default": "5Mi" }, - "frontEndpoint": { - "description": "Host of InternetArchive Frontend.", + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", "type": "string", - "default": "https://archive.org" + "default": "4.656Gi" }, - "secretAccessKey": { - "description": "IAS3 Secret Key (password).", - "type": "string" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false }, - "waitArchive": { - "description": "Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.", - "type": "string", - "default": "0s" - } - } - }, - "storage.jottacloudConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot" + "description": { + "description": "Description of the remote.", + "type": "string" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "md5MemoryLimit": { - "description": "Files bigger than this will be cached on disk to calculate the MD5 if required.", - "type": "string", - "default": "10Mi" - }, - "noVersions": { - "description": "Avoid server side versioning by deleting files and recreating files instead of overwriting them.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "trashedOnly": { - "description": "Only show files that are in the trash.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "uploadResumeLimit": { - "description": "Files bigger than this can be resumed if the upload fail's.", - "type": "string", - "default": "10Mi" - } - } - }, - "storage.koofrDigistorageConfig": { - "type": "object", - "properties": { + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "endpoint": { + "description": "Endpoint for OSS API.", + "type": "string", + "example": "oss-accelerate.aliyuncs.com" }, - "password": { - "description": "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).", - "type": "string" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", "default": true }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.koofrKoofrConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "unset" }, - "mountid": { - "description": "Mount ID of the mount to use.", - "type": "string" + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 }, - "password": { - "description": "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "setmtime": { - "description": "Does the backend support setting modification time.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", - "default": true + "default": false }, - "user": { - "description": "Your user name.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false + }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" - } - } - }, - "storage.koofrOtherConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", + }, + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Off" }, - "endpoint": { - "description": "The Koofr API endpoint to use.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "mountid": { - "description": "Mount ID of the mount to use.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "password": { - "description": "Your password for rclone (generate one at your service's settings page).", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "setmtime": { - "description": "Does the backend support setting modification time.", - "type": "boolean", - "default": true + "storageClass": { + "description": "The storage class to use when storing new objects in OSS.", + "type": "string", + "example": "" }, - "user": { - "description": "Your user name.", - "type": "string" - } - } - }, - "storage.localConfig": { - "type": "object", - "properties": { - "caseInsensitive": { - "description": "Force the filesystem to report itself as case insensitive.", - "type": "boolean", - "default": false + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "caseSensitive": { - "description": "Force the filesystem to report itself as case sensitive.", - "type": "boolean", - "default": false + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "copyLinks": { - "description": "Follow symlinks and copy the pointed to item.", - "type": "boolean", - "default": false + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "Slash,Dot" + "default": "unset" }, - "links": { - "description": "Translate symlinks to/from regular files with a '.rclonelink' extension.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "noCheckUpdated": { - "description": "Don't check to see if the files change during upload.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, - "noPreallocate": { - "description": "Disable preallocation of disk space for transferred files.", + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "noSetModtime": { - "description": "Disable setting modtime.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "noSparse": { - "description": "Disable sparse files for multi-thread downloads.", + "versions": { + "description": "Include old versions in directory listings.", "type": "boolean", "default": false + } + } + }, + "storage.s3ArvanCloudConfig": { + "type": "object", + "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "nounc": { - "description": "Disable UNC (long path names) conversion on Windows.", - "type": "boolean", - "default": false, - "example": true + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" }, - "oneFileSystem": { - "description": "Don't cross filesystem boundaries (unix/macOS only).", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, + "chunkSize": { + "description": "Chunk size to use for uploading.", + "type": "string", + "default": "5Mi" + }, + "copyCutoff": { + "description": "Cutoff for switching to multipart copy.", + "type": "string", + "default": "4.656Gi" + }, + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "skipLinks": { - "description": "Don't warn about skipped symlinks.", + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "unicodeNormalization": { - "description": "Apply unicode NFC normalization to paths and filenames.", + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, - "zeroSizeLinks": { - "description": "Assume the Stat size of links is zero (and read them instead) (deprecated).", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false - } - } - }, - "storage.mailruConfig": { - "type": "object", - "properties": { - "checkHash": { - "description": "What should copy do if file checksum is mismatched or invalid.", - "type": "boolean", - "default": true, - "example": true + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" }, "encoding": { "description": "The encoding for the backend.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "default": "Slash,InvalidUtf8,Dot" }, - "pass": { - "description": "Password.", - "type": "string" + "endpoint": { + "description": "Endpoint for Arvan Cloud Object Storage (AOS) API.", + "type": "string", + "example": "s3.ir-thr-at1.arvanstorage.ir" }, - "quirks": { - "description": "Comma separated list of internal maintenance flags.", - "type": "string" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "speedupEnable": { - "description": "Skip full upload if there is another file with same data hash.", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", "type": "boolean", - "default": true, - "example": true + "default": true }, - "speedupFilePatterns": { - "description": "Comma separated list of file name patterns eligible for speedup (put by hash).", - "type": "string", - "default": "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", - "example": "" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "speedupMaxDisk": { - "description": "This option allows you to disable speedup (put by hash) for large files.", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "3Gi", - "example": "0" + "default": "unset" }, - "speedupMaxMemory": { - "description": "Files larger than the size given below will always be hashed on disk.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must match endpoint.", "type": "string", - "default": "32Mi", - "example": "0" + "example": "ir-thr-at1" }, - "user": { - "description": "User name (usually email).", - "type": "string" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "userAgent": { - "description": "HTTP user agent used internally by client.", - "type": "string" - } - } - }, - "storage.megaConfig": { - "type": "object", - "properties": { - "debug": { - "description": "Output more debug from Mega.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "hardDelete": { - "description": "Delete files permanently rather than putting them into the trash.", + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "pass": { - "description": "Password.", - "type": "string" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "useHttps": { - "description": "Use HTTPS for transfers.", + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", "type": "boolean", "default": false }, - "user": { - "description": "User name.", - "type": "string" - } - } - }, - "storage.netstorageConfig": { - "type": "object", - "properties": { - "account": { - "description": "Set the NetStorage account name", - "type": "string" + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false }, - "host": { - "description": "Domain+path of NetStorage host to connect to.", + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "protocol": { - "description": "Select between HTTP or HTTPS protocol.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "https", - "example": "http" + "default": "Off" }, - "secret": { - "description": "Set the NetStorage account secret/G2O key for authentication.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" - } - } - }, - "storage.onedriveConfig": { - "type": "object", - "properties": { - "accessScopes": { - "description": "Set scopes to be requested by rclone.", - "type": "string", - "default": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access", - "example": "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" }, - "authUrl": { - "description": "Auth server URL.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "chunkSize": { - "description": "Chunk size to upload files with - must be multiple of 320k (327,680 bytes).", - "type": "string", - "default": "10Mi" - }, - "clientId": { - "description": "OAuth Client Id.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "clientSecret": { - "description": "OAuth Client Secret.", - "type": "string" + "storageClass": { + "description": "The storage class to use when storing new objects in ArvanCloud.", + "type": "string", + "example": "STANDARD" }, - "disableSitePermission": { - "description": "Disable the request for Sites.Read.All permission.", - "type": "boolean", - "default": false + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "driveId": { - "description": "The ID of the drive to use.", - "type": "string" + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", + "type": "string", + "default": "200Mi" }, - "driveType": { - "description": "The type of the drive (personal | business | documentLibrary).", - "type": "string" + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" }, - "encoding": { - "description": "The encoding for the backend.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot" + "default": "unset" }, - "exposeOnenoteFiles": { - "description": "Set to make OneNote files show up in directory listings.", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, - "hashType": { - "description": "Specify the hash in use for the backend.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "auto", - "example": "auto" - }, - "linkPassword": { - "description": "Set the password for links created by the link command.", - "type": "string" + "default": "unset" }, - "linkScope": { - "description": "Set the scope of the links created by the link command.", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "default": "anonymous", - "example": "anonymous" + "default": "unset" }, - "linkType": { - "description": "Set the type of the links created by the link command.", - "type": "string", - "default": "view", - "example": "view" + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false }, - "listChunk": { - "description": "Size of listing chunk.", - "type": "integer", - "default": 1000 + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "noVersions": { - "description": "Remove all versions on modifying operations.", + "v2Auth": { + "description": "If true use v2 authentication.", "type": "boolean", "default": false }, - "region": { - "description": "Choose national cloud region for OneDrive.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "global", - "example": "global" - }, - "rootFolderId": { - "description": "ID of the root folder.", - "type": "string" + "default": "off" }, - "serverSideAcrossConfigs": { - "description": "Allow server-side operations (e.g. copy) to work across different onedrive configs.", + "versionDeleted": { + "description": "Show deleted file markers when using versions.", "type": "boolean", "default": false }, - "token": { - "description": "OAuth Access Token as a JSON blob.", - "type": "string" - }, - "tokenUrl": { - "description": "Token server url.", - "type": "string" + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosEnv_authConfig": { + "storage.s3CephConfig": { "type": "object", "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Object storage API.", + "description": "Endpoint for S3 API.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", - "default": false + "default": false, + "example": false }, - "namespace": { - "description": "Object storage namespace", + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", "type": "string" }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" + }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" + }, + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", "example": "" }, "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", "example": "" }, "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", + "description": "If using KMS ID you must provide the ARN of Key.", "type": "string", "example": "" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", - "type": "string", - "default": "Standard", - "example": "Standard" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosInstance_principal_authConfig": { + "storage.s3ChinaMobileConfig": { "type": "object", "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "" + "default": "Slash,InvalidUtf8,Dot" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "endpoint": { + "description": "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.", "type": "string", - "example": "" + "example": "eos-wuxi-1.cmecloud.cn" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", - "default": 10 + "default": 0 }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" - } - } - }, - "storage.oosNo_authConfig": { - "type": "object", - "properties": { - "chunkSize": { - "description": "Chunk size to use for uploading.", + "locationConstraint": { + "description": "Location constraint - must match endpoint.", "type": "string", - "default": "5Mi" + "example": "wuxi1" }, - "copyCutoff": { - "description": "Cutoff for switching to multipart copy.", - "type": "string", - "default": "4.656Gi" + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 }, - "copyTimeout": { - "description": "Timeout for copy.", + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,InvalidUtf8,Dot" + "default": "unset" }, - "endpoint": { - "description": "Endpoint for Object storage API.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" + }, + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", "type": "string", "example": "" }, "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", "type": "string", "example": "" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", "type": "string", "example": "" }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", + "storageClass": { + "description": "The storage class to use when storing new objects in ChinaMobile.", "type": "string", "example": "" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", - "type": "string", - "default": "Standard", - "example": "Standard" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", - "default": 10 + "default": 4 }, "uploadCutoff": { "description": "Cutoff for switching to chunked upload.", "type": "string", "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", + "type": "string", + "default": "unset" + }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false + }, + "versionAt": { + "description": "Show file versions as they were at the specified time.", + "type": "string", + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosResource_principal_authConfig": { + "storage.s3CloudflareConfig": { "type": "object", "properties": { + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" + }, + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", + "type": "string", + "example": "private" + }, "chunkSize": { "description": "Chunk size to use for uploading.", "type": "string", "default": "5Mi" }, - "compartment": { - "description": "Object storage compartment OCID", - "type": "string" - }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" + "decompress": { + "description": "If set this will decompress gzip encoded objects.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", "default": false }, + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", + "type": "boolean", + "default": false + }, + "downloadUrl": { + "description": "Custom endpoint for downloads.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Object storage API.", + "description": "Endpoint for S3 API.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false + }, + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true + }, + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 + }, + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", + "type": "string", + "default": "unset" + }, + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", + "type": "integer", + "default": 0 + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", + "type": "string", + "default": "1m0s" + }, + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "mightGzip": { + "description": "Set this if the backend might gzip objects.", + "type": "string", + "default": "unset" }, "noCheckBucket": { "description": "If set, don't attempt to check the bucket exists or create it.", "type": "boolean", "default": false }, + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false + }, + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false + }, + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", + "type": "string" + }, "region": { - "description": "Object storage Region", + "description": "Region to connect to.", + "type": "string", + "example": "auto" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "sessionToken": { + "description": "An AWS session token.", + "type": "string" + }, + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" + }, + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 + }, + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "example": "" + "default": "200Mi" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "example": "" + "default": "unset" }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "example": "" + "default": "unset" }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", "type": "string", - "example": "" + "default": "unset" }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", + "type": "boolean", + "default": false + }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 10 + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "200Mi" + "default": "off" + }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.oosUser_principal_authConfig": { + "storage.s3DigitalOceanConfig": { "type": "object", "properties": { - "chunkSize": { - "description": "Chunk size to use for uploading.", - "type": "string", - "default": "5Mi" + "accessKeyId": { + "description": "AWS Access Key ID.", + "type": "string" }, - "compartment": { - "description": "Object storage compartment OCID", + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", "type": "string" }, - "configFile": { - "description": "Path to OCI config file", + "bucketAcl": { + "description": "Canned ACL used when creating buckets.", "type": "string", - "default": "~/.oci/config", - "example": "~/.oci/config" + "example": "private" }, - "configProfile": { - "description": "Profile name inside the oci config file", + "chunkSize": { + "description": "Chunk size to use for uploading.", "type": "string", - "default": "Default", - "example": "Default" + "default": "5Mi" }, "copyCutoff": { "description": "Cutoff for switching to multipart copy.", "type": "string", "default": "4.656Gi" }, - "copyTimeout": { - "description": "Timeout for copy.", - "type": "string", - "default": "1m0s" - }, - "disableChecksum": { - "description": "Don't store MD5 checksum with object metadata.", + "decompress": { + "description": "If set this will decompress gzip encoded objects.", "type": "boolean", "default": false }, - "encoding": { - "description": "The encoding for the backend.", - "type": "string", - "default": "Slash,InvalidUtf8,Dot" - }, - "endpoint": { - "description": "Endpoint for Object storage API.", + "description": { + "description": "Description of the remote.", "type": "string" }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", "type": "boolean", "default": false }, - "namespace": { - "description": "Object storage namespace", - "type": "string" + "disableChecksum": { + "description": "Don't store MD5 checksum with object metadata.", + "type": "boolean", + "default": false }, - "noCheckBucket": { - "description": "If set, don't attempt to check the bucket exists or create it.", + "disableHttp2": { + "description": "Disable usage of http2 for S3 backends.", "type": "boolean", "default": false }, - "region": { - "description": "Object storage Region", + "downloadUrl": { + "description": "Custom endpoint for downloads.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the optional header that specifies \"AES256\" as the encryption algorithm.", + "encoding": { + "description": "The encoding for the backend.", "type": "string", - "example": "" + "default": "Slash,InvalidUtf8,Dot" }, - "sseCustomerKey": { - "description": "To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to", + "endpoint": { + "description": "Endpoint for S3 API.", "type": "string", - "example": "" + "example": "syd1.digitaloceanspaces.com" }, - "sseCustomerKeyFile": { - "description": "To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated", - "type": "string", - "example": "" + "envAuth": { + "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", + "type": "boolean", + "default": false, + "example": false }, - "sseCustomerKeySha256": { - "description": "If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption", - "type": "string", - "example": "" + "forcePathStyle": { + "description": "If true use path style access if false use virtual hosted style.", + "type": "boolean", + "default": true }, - "sseKmsKeyId": { - "description": "if using using your own master key in vault, this header specifies the", - "type": "string", - "example": "" + "listChunk": { + "description": "Size of listing chunk (response list for each ListObject S3 request).", + "type": "integer", + "default": 1000 }, - "storageTier": { - "description": "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm", + "listUrlEncode": { + "description": "Whether to url encode listings: true/false/unset", "type": "string", - "default": "Standard", - "example": "Standard" + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "listVersion": { + "description": "Version of ListObjects to use: 1,2 or 0 for auto.", "type": "integer", - "default": 10 + "default": 0 }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", - "type": "string", - "default": "200Mi" - } - } - }, - "storage.opendriveConfig": { - "type": "object", - "properties": { - "chunkSize": { - "description": "Files will be uploaded in chunks this size.", + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, + "maxUploadParts": { + "description": "Maximum number of parts in a multipart upload.", + "type": "integer", + "default": 10000 + }, + "memoryPoolFlushTime": { + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", - "default": "10Mi" + "default": "1m0s" }, - "encoding": { - "description": "The encoding for the backend.", + "memoryPoolUseMmap": { + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", + "type": "boolean", + "default": false + }, + "mightGzip": { + "description": "Set this if the backend might gzip objects.", "type": "string", - "default": "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot" + "default": "unset" }, - "password": { - "description": "Password.", - "type": "string" + "noCheckBucket": { + "description": "If set, don't attempt to check the bucket exists or create it.", + "type": "boolean", + "default": false }, - "username": { - "description": "Username.", - "type": "string" - } - } - }, - "storage.pcloudConfig": { - "type": "object", - "properties": { - "authUrl": { - "description": "Auth server URL.", - "type": "string" + "noHead": { + "description": "If set, don't HEAD uploaded objects to check integrity.", + "type": "boolean", + "default": false }, - "clientId": { - "description": "OAuth Client Id.", - "type": "string" + "noHeadObject": { + "description": "If set, do not do HEAD before GET when getting objects.", + "type": "boolean", + "default": false }, - "clientSecret": { - "description": "OAuth Client Secret.", + "noSystemMetadata": { + "description": "Suppress setting and reading of system metadata", + "type": "boolean", + "default": false + }, + "profile": { + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "encoding": { - "description": "The encoding for the backend.", + "region": { + "description": "Region to connect to.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" + "example": "" }, - "hostname": { - "description": "Hostname to connect to.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "default": "api.pcloud.com", - "example": "api.pcloud.com" + "default": "Off" }, - "password": { - "description": "Your pcloud password.", + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", "type": "string" }, - "rootFolderId": { - "description": "Fill in for rclone to use a non root folder as its starting point.", - "type": "string", - "default": "d0" - }, - "token": { - "description": "OAuth Access Token as a JSON blob.", + "sessionToken": { + "description": "An AWS session token.", "type": "string" }, - "tokenUrl": { - "description": "Token server url.", + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", "type": "string" }, - "username": { - "description": "Your pcloud username.", - "type": "string" - } - } - }, - "storage.premiumizemeConfig": { - "type": "object", - "properties": { - "apiKey": { - "description": "API Key.", - "type": "string" + "uploadConcurrency": { + "description": "Concurrency for multipart uploads and copies.", + "type": "integer", + "default": 4 }, - "encoding": { - "description": "The encoding for the backend.", + "uploadCutoff": { + "description": "Cutoff for switching to chunked upload.", "type": "string", - "default": "Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot" - } - } - }, - "storage.putioConfig": { - "type": "object", - "properties": { - "encoding": { - "description": "The encoding for the backend.", + "default": "200Mi" + }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", "type": "string", - "default": "Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot" - } - } - }, - "storage.qingstorConfig": { - "type": "object", - "properties": { - "accessKeyId": { - "description": "QingStor Access Key ID.", - "type": "string" + "default": "unset" }, - "chunkSize": { - "description": "Chunk size to use for uploading.", + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", "type": "string", - "default": "4Mi" + "default": "unset" }, - "connectionRetries": { - "description": "Number of connection retries.", - "type": "integer", - "default": 3 + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false }, - "encoding": { - "description": "The encoding for the backend.", + "useMultipartEtag": { + "description": "Whether to use ETag in multipart uploads for verification", "type": "string", - "default": "Slash,Ctl,InvalidUtf8" + "default": "unset" }, - "endpoint": { - "description": "Enter an endpoint URL to connection QingStor API.", - "type": "string" + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" }, - "envAuth": { - "description": "Get QingStor credentials from runtime.", + "usePresignedRequest": { + "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", - "default": false, - "example": false + "default": false }, - "secretAccessKey": { - "description": "QingStor Secret Access Key (password).", - "type": "string" + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" }, - "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", - "type": "integer", - "default": 1 + "v2Auth": { + "description": "If true use v2 authentication.", + "type": "boolean", + "default": false }, - "uploadCutoff": { - "description": "Cutoff for switching to chunked upload.", + "versionAt": { + "description": "Show file versions as they were at the specified time.", "type": "string", - "default": "200Mi" + "default": "off" }, - "zone": { - "description": "Zone to connect to.", - "type": "string", - "example": "pek3a" + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, + "versions": { + "description": "Include old versions in directory listings.", + "type": "boolean", + "default": false } } }, - "storage.s3AWSConfig": { + "storage.s3DreamhostConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11121,6 +13890,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11142,7 +13920,8 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string" + "type": "string", + "example": "objects-us-east-1.dream.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11155,11 +13934,6 @@ "type": "boolean", "default": true }, - "leavePartsOnError": { - "description": "If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.", - "type": "boolean", - "default": false - }, "listChunk": { "description": "Size of listing chunk (response list for each ListObject S3 request).", "type": "integer", @@ -11177,8 +13951,7 @@ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "" + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -11186,12 +13959,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11227,22 +14000,17 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "us-east-1" + "example": "" }, - "requesterPays": { - "description": "Enables requester pays option when interacting with S3 bucket.", - "type": "boolean", - "default": false + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -11251,42 +14019,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" - }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", - "type": "string", - "example": "" - }, - "stsEndpoint": { - "description": "Endpoint for STS.", - "type": "string" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11295,8 +14029,18 @@ "type": "string", "default": "200Mi" }, - "useAccelerateEndpoint": { - "description": "If true use the AWS S3 accelerated endpoint.", + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", "type": "boolean", "default": false }, @@ -11305,11 +14049,21 @@ "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11320,6 +14074,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11327,7 +14086,7 @@ } } }, - "storage.s3AlibabaConfig": { + "storage.s3GCSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11358,6 +14117,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11378,9 +14146,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OSS API.", + "description": "Endpoint for Google Cloud Storage.", "type": "string", - "example": "oss-accelerate.aliyuncs.com" + "example": "https://storage.googleapis.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11408,18 +14176,22 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11452,6 +14224,16 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -11464,13 +14246,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in OSS.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11479,16 +14256,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11499,6 +14301,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11506,7 +14313,7 @@ } } }, - "storage.s3ArvanCloudConfig": { + "storage.s3HuaweiOBSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11537,6 +14344,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11557,9 +14373,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Arvan Cloud Object Storage (AOS) API.", + "description": "Endpoint for OBS API.", "type": "string", - "example": "s3.ir-thr-at1.arvanstorage.com" + "example": "obs.af-south-1.myhuaweicloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11587,23 +14403,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "ir-thr-at1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11636,6 +14447,16 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.", + "type": "string", + "example": "af-south-1" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -11648,13 +14469,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in ArvanCloud.", - "type": "string", - "example": "STANDARD" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11663,16 +14479,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11683,6 +14524,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11690,7 +14536,7 @@ } } }, - "storage.s3CephConfig": { + "storage.s3IBMCOSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11699,7 +14545,8 @@ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" + "type": "string", + "example": "private" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -11721,6 +14568,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11741,8 +14597,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for IBM COS S3 API.", + "type": "string", + "example": "s3.us.cloud-object-storage.appdomain.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -11771,8 +14628,9 @@ "default": 0 }, "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" + "description": "Location constraint - must match endpoint when using IBM Cloud Public.", + "type": "string", + "example": "us-standard" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -11780,12 +14638,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -11815,58 +14673,33 @@ "default": false }, "profile": { - "description": "Profile to use in the shared credentials file.", - "type": "string" - }, - "region": { - "description": "Region to connect to.", - "type": "string", - "example": "" - }, - "secretAccessKey": { - "description": "AWS Secret Access Key (password).", - "type": "string" - }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sessionToken": { - "description": "An AWS session token.", - "type": "string" - }, - "sharedCredentialsFile": { - "description": "Path to the shared credentials file.", + "description": "Profile to use in the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "region": { + "description": "Region to connect to.", "type": "string", "example": "" }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" + "secretAccessKey": { + "description": "AWS Secret Access Key (password).", + "type": "string" }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" + "sessionToken": { + "description": "An AWS session token.", + "type": "string" }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" + "sharedCredentialsFile": { + "description": "Path to the shared credentials file.", + "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -11875,16 +14708,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -11895,6 +14753,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -11902,7 +14765,7 @@ } } }, - "storage.s3ChinaMobileConfig": { + "storage.s3IDriveConfig": { "type": "object", "properties": { "accessKeyId": { @@ -11933,6 +14796,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -11952,11 +14824,6 @@ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, - "endpoint": { - "description": "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.", - "type": "string", - "example": "eos-wuxi-1.cmecloud.cn" - }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -11983,23 +14850,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must match endpoint.", - "type": "string", - "example": "wuxi1" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12032,15 +14894,15 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -12049,33 +14911,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "storageClass": { - "description": "The storage class to use when storing new objects in ChinaMobile.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12084,16 +14921,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12104,6 +14966,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12111,13 +14978,17 @@ } } }, - "storage.s3CloudflareConfig": { + "storage.s3IONOSConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, + "acl": { + "description": "Canned ACL used when creating buckets and storing or copying objects.", + "type": "string" + }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -12138,6 +15009,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12158,8 +15038,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for IONOS S3 Object Storage.", + "type": "string", + "example": "s3-eu-central-1.ionoscloud.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12193,12 +15074,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12232,9 +15113,14 @@ "type": "string" }, "region": { - "description": "Region to connect to.", + "description": "Region where your bucket will be created and your data stored.", "type": "string", - "example": "auto" + "example": "de" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -12249,7 +15135,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12258,16 +15144,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12278,6 +15189,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12285,7 +15201,7 @@ } } }, - "storage.s3DigitalOceanConfig": { + "storage.s3LeviiaConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12316,6 +15232,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12337,8 +15262,7 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string", - "example": "syd1.digitaloceanspaces.com" + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12366,22 +15290,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12419,6 +15339,11 @@ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -12432,7 +15357,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12441,16 +15366,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12461,6 +15411,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12468,7 +15423,7 @@ } } }, - "storage.s3DreamhostConfig": { + "storage.s3LiaraConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12499,6 +15454,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12519,9 +15483,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", + "description": "Endpoint for Liara Object Storage API.", "type": "string", - "example": "objects-us-east-1.dream.io" + "example": "storage.iran.liara.space" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12549,22 +15513,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12597,10 +15557,10 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -12614,8 +15574,13 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in Liara", + "type": "string", + "example": "STANDARD" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12624,16 +15589,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12644,6 +15634,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12651,7 +15646,7 @@ } } }, - "storage.s3HuaweiOBSConfig": { + "storage.s3LinodeConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12682,6 +15677,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12702,9 +15706,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for OBS API.", + "description": "Endpoint for Linode Object Storage API.", "type": "string", - "example": "obs.af-south-1.myhuaweicloud.com" + "example": "us-southeast-1.linodeobjects.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12738,12 +15742,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12776,10 +15780,10 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "af-south-1" + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -12794,7 +15798,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12803,16 +15807,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -12823,6 +15852,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -12830,7 +15864,7 @@ } } }, - "storage.s3IBMCOSConfig": { + "storage.s3LyveCloudConfig": { "type": "object", "properties": { "accessKeyId": { @@ -12839,8 +15873,7 @@ }, "acl": { "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string", - "example": "private" + "type": "string" }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", @@ -12862,6 +15895,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -12882,9 +15924,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IBM COS S3 API.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.us.cloud-object-storage.appdomain.cloud" + "example": "s3.us-east-1.lyvecloud.seagate.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -12913,9 +15955,8 @@ "default": 0 }, "locationConstraint": { - "description": "Location constraint - must match endpoint when using IBM Cloud Public.", - "type": "string", - "example": "us-standard" + "description": "Location constraint - must be set to match the Region.", + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -12923,12 +15964,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -12966,6 +16007,11 @@ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -12979,7 +16025,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -12988,16 +16034,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13008,6 +16079,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13015,7 +16091,7 @@ } } }, - "storage.s3IDriveConfig": { + "storage.s3MagaluConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13046,6 +16122,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13065,6 +16150,11 @@ "type": "string", "default": "Slash,InvalidUtf8,Dot" }, + "endpoint": { + "description": "Endpoint for S3 API.", + "type": "string", + "example": "br-se1.magaluobjects.com" + }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", "type": "boolean", @@ -13097,12 +16187,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13135,6 +16225,11 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -13147,8 +16242,13 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in Magalu.", + "type": "string", + "example": "STANDARD" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13157,16 +16257,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13177,6 +16302,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13184,7 +16314,7 @@ } } }, - "storage.s3IONOSConfig": { + "storage.s3MinioConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13215,6 +16345,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13235,9 +16374,8 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for IONOS S3 Object Storage.", - "type": "string", - "example": "s3-eu-central-1.ionoscloud.com" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13265,18 +16403,22 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13310,14 +16452,24 @@ "type": "string" }, "region": { - "description": "Region where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "de" + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" }, + "serverSideEncryption": { + "description": "The server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, "sessionToken": { "description": "An AWS session token.", "type": "string" @@ -13326,8 +16478,33 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "sseCustomerAlgorithm": { + "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + "type": "string", + "example": "" + }, + "sseCustomerKey": { + "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyBase64": { + "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", + "type": "string", + "example": "" + }, + "sseCustomerKeyMd5": { + "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", + "type": "string", + "example": "" + }, + "sseKmsKeyId": { + "description": "If using KMS ID you must provide the ARN of Key.", + "type": "string", + "example": "" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13336,16 +16513,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13356,6 +16558,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13363,7 +16570,7 @@ } } }, - "storage.s3LiaraConfig": { + "storage.s3NeteaseConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13394,6 +16601,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13414,9 +16630,8 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Liara Object Storage API.", - "type": "string", - "example": "storage.iran.liara.space" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13444,18 +16659,22 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13488,6 +16707,16 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region to connect to.", + "type": "string", + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -13500,13 +16729,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Liara", - "type": "string", - "example": "STANDARD" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13515,16 +16739,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13535,6 +16784,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13542,7 +16796,7 @@ } } }, - "storage.s3LyveCloudConfig": { + "storage.s3OtherConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13573,6 +16827,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13594,8 +16857,7 @@ }, "endpoint": { "description": "Endpoint for S3 API.", - "type": "string", - "example": "s3.us-east-1.lyvecloud.seagate.com" + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13633,12 +16895,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13676,6 +16938,11 @@ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -13689,7 +16956,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13698,16 +16965,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13718,6 +17010,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13725,7 +17022,7 @@ } } }, - "storage.s3MinioConfig": { + "storage.s3PetaboxConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13756,6 +17053,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13776,8 +17082,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for Petabox S3 Object Storage.", + "type": "string", + "example": "s3.petabox.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -13805,22 +17112,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -13854,18 +17157,18 @@ "type": "string" }, "region": { - "description": "Region to connect to.", + "description": "Region where your bucket will be created and your data stored.", "type": "string", - "example": "" + "example": "us-east-1" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", - "type": "string" - }, - "serverSideEncryption": { - "description": "The server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" + "type": "string" }, "sessionToken": { "description": "An AWS session token.", @@ -13875,33 +17178,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "sseCustomerAlgorithm": { - "description": "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", - "type": "string", - "example": "" - }, - "sseCustomerKey": { - "description": "To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyBase64": { - "description": "If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.", - "type": "string", - "example": "" - }, - "sseCustomerKeyMd5": { - "description": "If using SSE-C you may provide the secret encryption key MD5 checksum (optional).", - "type": "string", - "example": "" - }, - "sseKmsKeyId": { - "description": "If using KMS ID you must provide the ARN of Key.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -13910,16 +17188,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -13930,6 +17233,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -13937,7 +17245,7 @@ } } }, - "storage.s3NeteaseConfig": { + "storage.s3QiniuConfig": { "type": "object", "properties": { "accessKeyId": { @@ -13968,6 +17276,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -13988,8 +17305,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for Qiniu Object Storage.", + "type": "string", + "example": "s3-cn-east-1.qiniucs.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14019,7 +17337,8 @@ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string" + "type": "string", + "example": "cn-east-1" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -14027,12 +17346,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14068,7 +17387,12 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "" + "example": "cn-east-1" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14082,8 +17406,13 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in Qiniu.", + "type": "string", + "example": "STANDARD" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14092,16 +17421,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14112,6 +17466,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14119,7 +17478,7 @@ } } }, - "storage.s3OtherConfig": { + "storage.s3RackCorpConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14150,6 +17509,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14170,8 +17538,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", - "type": "string" + "description": "Endpoint for RackCorp Object Storage.", + "type": "string", + "example": "s3.rackcorp.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14200,8 +17569,9 @@ "default": 0 }, "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" + "description": "Location constraint - the location where your bucket will be located and your data stored.", + "type": "string", + "example": "global" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -14209,12 +17579,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14248,9 +17618,14 @@ "type": "string" }, "region": { - "description": "Region to connect to.", + "description": "region - the location where your bucket will be created and your data stored.", "type": "string", - "example": "" + "example": "global" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14265,7 +17640,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14274,16 +17649,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14294,6 +17694,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14301,7 +17706,7 @@ } } }, - "storage.s3QiniuConfig": { + "storage.s3RcloneConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14332,6 +17737,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14352,9 +17766,8 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Qiniu Object Storage.", - "type": "string", - "example": "s3-cn-east-1.qiniucs.com" + "description": "Endpoint for S3 API.", + "type": "string" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14384,8 +17797,7 @@ }, "locationConstraint": { "description": "Location constraint - must be set to match the Region.", - "type": "string", - "example": "cn-east-1" + "type": "string" }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", @@ -14393,12 +17805,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14434,7 +17846,12 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "cn-east-1" + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14448,13 +17865,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in Qiniu.", - "type": "string", - "example": "STANDARD" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14463,16 +17875,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14483,6 +17920,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14490,7 +17932,7 @@ } } }, - "storage.s3RackCorpConfig": { + "storage.s3ScalewayConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14521,6 +17963,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14541,9 +17992,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for RackCorp Object Storage.", + "description": "Endpoint for Scaleway Object Storage.", "type": "string", - "example": "s3.rackcorp.com" + "example": "s3.nl-ams.scw.cloud" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14571,23 +18022,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - the location where your bucket will be located and your data stored.", - "type": "string", - "example": "global" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14621,9 +18067,14 @@ "type": "string" }, "region": { - "description": "region - the location where your bucket will be created and your data stored.", + "description": "Region to connect to.", "type": "string", - "example": "global" + "example": "nl-ams" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14637,8 +18088,13 @@ "description": "Path to the shared credentials file.", "type": "string" }, + "storageClass": { + "description": "The storage class to use when storing new objects in S3.", + "type": "string", + "example": "" + }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14647,16 +18103,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14667,6 +18148,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14674,7 +18160,7 @@ } } }, - "storage.s3ScalewayConfig": { + "storage.s3SeaweedFSConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14705,6 +18191,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14725,9 +18220,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Scaleway Object Storage.", + "description": "Endpoint for S3 API.", "type": "string", - "example": "s3.nl-ams.scw.cloud" + "example": "localhost:8333" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14755,18 +18250,22 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14802,7 +18301,12 @@ "region": { "description": "Region to connect to.", "type": "string", - "example": "nl-ams" + "example": "" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -14816,13 +18320,8 @@ "description": "Path to the shared credentials file.", "type": "string" }, - "storageClass": { - "description": "The storage class to use when storing new objects in S3.", - "type": "string", - "example": "" - }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -14831,16 +18330,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -14851,6 +18375,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -14858,7 +18387,7 @@ } } }, - "storage.s3SeaweedFSConfig": { + "storage.s3StackPathConfig": { "type": "object", "properties": { "accessKeyId": { @@ -14889,6 +18418,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -14909,9 +18447,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for S3 API.", + "description": "Endpoint for StackPath Object Storage.", "type": "string", - "example": "localhost:8333" + "example": "s3.us-east-2.stackpathstorage.com" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -14939,22 +18477,18 @@ "type": "integer", "default": 0 }, - "locationConstraint": { - "description": "Location constraint - must be set to match the Region.", - "type": "string" - }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -14992,6 +18526,11 @@ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15005,7 +18544,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15014,16 +18553,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15034,6 +18598,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15041,17 +18610,13 @@ } } }, - "storage.s3StackPathConfig": { + "storage.s3StorjConfig": { "type": "object", "properties": { "accessKeyId": { "description": "AWS Access Key ID.", "type": "string" }, - "acl": { - "description": "Canned ACL used when creating buckets and storing or copying objects.", - "type": "string" - }, "bucketAcl": { "description": "Canned ACL used when creating buckets.", "type": "string", @@ -15072,6 +18637,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15092,9 +18666,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for StackPath Object Storage.", + "description": "Endpoint for Storj Gateway.", "type": "string", - "example": "s3.us-east-2.stackpathstorage.com" + "example": "gateway.storjshare.io" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15128,12 +18702,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15166,10 +18740,10 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, - "region": { - "description": "Region to connect to.", + "sdkLogMode": { + "description": "Set to debug the SDK", "type": "string", - "example": "" + "default": "Off" }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", @@ -15184,7 +18758,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15193,16 +18767,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15213,6 +18812,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15220,7 +18824,7 @@ } } }, - "storage.s3StorjConfig": { + "storage.s3SynologyConfig": { "type": "object", "properties": { "accessKeyId": { @@ -15247,6 +18851,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15267,9 +18880,9 @@ "default": "Slash,InvalidUtf8,Dot" }, "endpoint": { - "description": "Endpoint for Storj Gateway.", + "description": "Endpoint for Synology C2 Object Storage API.", "type": "string", - "example": "gateway.storjshare.io" + "example": "eu-001.s3.synologyc2.net" }, "envAuth": { "description": "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).", @@ -15297,18 +18910,22 @@ "type": "integer", "default": 0 }, + "locationConstraint": { + "description": "Location constraint - must be set to match the Region.", + "type": "string" + }, "maxUploadParts": { "description": "Maximum number of parts in a multipart upload.", "type": "integer", "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15341,6 +18958,16 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "region": { + "description": "Region where your data stored.", + "type": "string", + "example": "eu-001" + }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15354,7 +18981,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15363,16 +18990,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15383,6 +19035,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15422,6 +19079,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15478,12 +19144,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15516,6 +19182,11 @@ "description": "Profile to use in the shared credentials file.", "type": "string" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15534,7 +19205,7 @@ "example": "" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15543,16 +19214,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15563,6 +19259,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15601,6 +19302,15 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, + "directoryMarkers": { + "description": "Upload an empty object with a trailing slash when a new directory is created", + "type": "boolean", + "default": false + }, "disableChecksum": { "description": "Don't store MD5 checksum with object metadata.", "type": "boolean", @@ -15661,12 +19371,12 @@ "default": 10000 }, "memoryPoolFlushTime": { - "description": "How often internal memory buffer pools will be flushed.", + "description": "How often internal memory buffer pools will be flushed. (no longer used)", "type": "string", "default": "1m0s" }, "memoryPoolUseMmap": { - "description": "Whether to use mmap buffers in internal memory pool.", + "description": "Whether to use mmap buffers in internal memory pool. (no longer used)", "type": "boolean", "default": false }, @@ -15704,6 +19414,11 @@ "type": "string", "example": "" }, + "sdkLogMode": { + "description": "Set to debug the SDK", + "type": "string", + "default": "Off" + }, "secretAccessKey": { "description": "AWS Secret Access Key (password).", "type": "string" @@ -15717,7 +19432,7 @@ "type": "string" }, "uploadConcurrency": { - "description": "Concurrency for multipart uploads.", + "description": "Concurrency for multipart uploads and copies.", "type": "integer", "default": 4 }, @@ -15726,16 +19441,41 @@ "type": "string", "default": "200Mi" }, + "useAcceptEncodingGzip": { + "description": "Whether to send `Accept-Encoding: gzip` header.", + "type": "string", + "default": "unset" + }, + "useAlreadyExists": { + "description": "Set if rclone should report BucketAlreadyExists errors on bucket creation.", + "type": "string", + "default": "unset" + }, + "useDualStack": { + "description": "If true use AWS S3 dual-stack endpoint (IPv6 support).", + "type": "boolean", + "default": false + }, "useMultipartEtag": { "description": "Whether to use ETag in multipart uploads for verification", "type": "string", "default": "unset" }, + "useMultipartUploads": { + "description": "Set if rclone should use multipart uploads.", + "type": "string", + "default": "unset" + }, "usePresignedRequest": { "description": "Whether to use a presigned request or PutObject for single part uploads", "type": "boolean", "default": false }, + "useUnsignedPayload": { + "description": "Whether to use an unsigned payload in PutObject", + "type": "string", + "default": "unset" + }, "v2Auth": { "description": "If true use v2 authentication.", "type": "boolean", @@ -15746,6 +19486,11 @@ "type": "string", "default": "off" }, + "versionDeleted": { + "description": "Show deleted file markers when using versions.", + "type": "boolean", + "default": false + }, "versions": { "description": "Include old versions in directory listings.", "type": "boolean", @@ -15770,6 +19515,10 @@ "type": "boolean", "default": false }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -15820,6 +19569,20 @@ "type": "integer", "default": 64 }, + "connections": { + "description": "Maximum number of SFTP simultaneous connections, 0 for unlimited.", + "type": "integer", + "default": 0 + }, + "copyIsHardlink": { + "description": "Set to enable server side copies using hardlinks.", + "type": "boolean", + "default": false + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "disableConcurrentReads": { "description": "If set don't use concurrent reads.", "type": "boolean", @@ -15839,6 +19602,10 @@ "description": "SSH host to connect to.", "type": "string" }, + "hostKeyAlgorithms": { + "description": "Space separated list of host key algorithms, ordered by preference.", + "type": "string" + }, "idleTimeout": { "description": "Max time before closing idle connections.", "type": "string", @@ -15922,6 +19689,14 @@ "type": "boolean", "default": false }, + "socksProxy": { + "description": "Socks 5 proxy host.", + "type": "string" + }, + "ssh": { + "description": "Path and arguments to external ssh binary.", + "type": "string" + }, "subsystem": { "description": "Specifies the SSH2 subsystem on the remote host.", "type": "string", @@ -15948,11 +19723,27 @@ "storage.sharefileConfig": { "type": "object", "properties": { + "authUrl": { + "description": "Auth server URL.", + "type": "string" + }, "chunkSize": { "description": "Upload chunk size.", "type": "string", "default": "64Mi" }, + "clientId": { + "description": "OAuth Client Id.", + "type": "string" + }, + "clientSecret": { + "description": "OAuth Client Secret.", + "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -15967,6 +19758,14 @@ "type": "string", "example": "" }, + "token": { + "description": "OAuth Access Token as a JSON blob.", + "type": "string" + }, + "tokenUrl": { + "description": "Token server url.", + "type": "string" + }, "uploadCutoff": { "description": "Cutoff for switching to multipart upload.", "type": "string", @@ -15986,6 +19785,10 @@ "type": "string", "default": "http://127.0.0.1:9980" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16006,6 +19809,10 @@ "type": "boolean", "default": true }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "domain": { "description": "Domain name for NTLM authentication.", "type": "string", @@ -16056,6 +19863,10 @@ "accessGrant": { "description": "Access grant.", "type": "string" + }, + "description": { + "description": "Description of the remote.", + "type": "string" } } }, @@ -16066,6 +19877,10 @@ "description": "API key.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "passphrase": { "description": "Encryption passphrase.", "type": "string" @@ -16101,6 +19916,10 @@ "description": "Sugarsync deleted folder id.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16159,10 +19978,14 @@ "default": 0 }, "chunkSize": { - "description": "Above this size files will be chunked into a _segments container.", + "description": "Above this size files will be chunked.", "type": "string", "default": "5Gi" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "domain": { "description": "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)", "type": "string" @@ -16184,6 +20007,11 @@ "default": false, "example": false }, + "fetchUntilEmptyPage": { + "description": "When paginating, always fetch unless we received an empty page.", + "type": "boolean", + "default": false + }, "key": { "description": "API key or password (OS_PASSWORD).", "type": "string" @@ -16203,6 +20031,11 @@ "type": "boolean", "default": false }, + "partialPageFetchThreshold": { + "description": "When paginating, fetch if the current page is within this percentage of the limit.", + "type": "integer", + "default": 0 + }, "region": { "description": "Region name - optional (OS_REGION_NAME).", "type": "string" @@ -16228,6 +20061,11 @@ "description": "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).", "type": "string" }, + "useSegmentsContainer": { + "description": "Choose destination for large object segments", + "type": "string", + "default": "unset" + }, "user": { "description": "User name to log in (OS_USERNAME).", "type": "string" @@ -16256,6 +20094,10 @@ "type": "string", "default": "epmfs" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "minFreeSpace": { "description": "Minimum viable free space for lfs/eplfs policies.", "type": "string", @@ -16279,10 +20121,19 @@ "description": "Your access token.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", "default": "Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot" + }, + "private": { + "description": "Set to make uploaded files private", + "type": "boolean", + "default": false } } }, @@ -16297,6 +20148,10 @@ "description": "Command to run to get a bearer token.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string" @@ -16305,10 +20160,34 @@ "description": "Set HTTP headers for all transactions.", "type": "string" }, + "nextcloudChunkSize": { + "description": "Nextcloud upload chunk size.", + "type": "string", + "default": "10Mi" + }, + "owncloudExcludeMounts": { + "description": "Exclude ownCloud mounted storages", + "type": "boolean", + "default": false + }, + "owncloudExcludeShares": { + "description": "Exclude ownCloud shares", + "type": "boolean", + "default": false + }, + "pacerMinSleep": { + "description": "Minimum time to sleep between API calls.", + "type": "string", + "default": "10ms" + }, "pass": { "description": "Password.", "type": "string" }, + "unixSocket": { + "description": "Path to a unix domain socket to dial to, instead of opening a TCP connection directly", + "type": "string" + }, "url": { "description": "URL of http host to connect to.", "type": "string" @@ -16320,7 +20199,7 @@ "vendor": { "description": "Name of the WebDAV site/service/software you are using.", "type": "string", - "example": "nextcloud" + "example": "fastmail" } } }, @@ -16339,6 +20218,10 @@ "description": "OAuth Client Secret.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", @@ -16349,6 +20232,11 @@ "type": "boolean", "default": false }, + "spoofUa": { + "description": "Set the user agent to match an official version of the yandex disk client. May help with upload performance.", + "type": "boolean", + "default": true + }, "token": { "description": "OAuth Access Token as a JSON blob.", "type": "string" @@ -16374,6 +20262,10 @@ "description": "OAuth Client Secret.", "type": "string" }, + "description": { + "description": "Description of the remote.", + "type": "string" + }, "encoding": { "description": "The encoding for the backend.", "type": "string", diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 490d4d59..60d90062 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -78,6 +78,15 @@ definitions: required: - name type: object + dataprep.DeletePieceRequest: + properties: + deleteCar: + description: 'Delete the physical CAR file from storage (default: true)' + type: boolean + force: + description: Delete even if deals reference this piece + type: boolean + type: object dataprep.DirEntry: properties: cid: @@ -276,6 +285,11 @@ definitions: type: integer jobId: type: integer + minPieceSizePadding: + description: 'MinPieceSizePadding tracks virtual padding for inline mode only. + Inline: stores padding amount, PieceReader serves zeros virtually. Non-inline: + always 0, literal zeros are written to CAR file for Curio TreeD compatibility.' + type: integer numOfFiles: type: integer pieceCid: @@ -286,7 +300,7 @@ definitions: description: PieceType indicates whether this is a data piece or DAG piece type: string preparationId: - description: Association + description: Association - SET NULL for fast prep deletion, async cleanup type: integer rootCid: type: string @@ -437,7 +451,8 @@ definitions: model.File: properties: attachmentId: - description: Associations + description: Associations - AttachmentID SET NULL for fast prep deletion, + async cleanup type: integer cid: description: CID is the CID of the file. @@ -496,7 +511,8 @@ definitions: type: $ref: '#/definitions/model.JobType' workerId: - description: Associations + description: Associations - AttachmentID SET NULL for fast prep deletion, + async cleanup type: string type: object model.JobState: @@ -870,44 +886,10 @@ definitions: required: - name type: object - storage.acdConfig: - properties: - authUrl: - description: Auth server URL. - type: string - checkpoint: - description: Checkpoint for internal polling (debug). - type: string - clientId: - description: OAuth Client Id. - type: string - clientSecret: - description: OAuth Client Secret. - type: string - encoding: - default: Slash,InvalidUtf8,Dot - description: The encoding for the backend. - type: string - templinkThreshold: - default: 9Gi - description: Files >= this size will be downloaded via their tempLink. - type: string - token: - description: OAuth Access Token as a JSON blob. - type: string - tokenUrl: - description: Token server url. - type: string - uploadWaitPerGb: - default: 3m0s - description: Additional time per GiB to wait after a failed complete upload - to see if it appears. - type: string - type: object storage.azureblobConfig: properties: accessTier: - description: 'Access tier of blob: hot, cool or archive.' + description: 'Access tier of blob: hot, cool, cold or archive.' type: string account: description: Azure Storage Account Name. @@ -937,6 +919,18 @@ definitions: default: false description: Send the certificate chain when using certificate auth. type: boolean + deleteSnapshots: + description: Set to specify how to deal with snapshots on blob deletion. + example: "" + type: string + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -962,11 +956,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean msiClientId: description: Object ID of the user-assigned MSI to use, if any. @@ -1036,14 +1032,17 @@ definitions: default: 4Gi description: Cutoff for switching to multipart copy. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: false description: Disable checksums for large (> upload cutoff) files. type: boolean downloadAuthDuration: default: 1w - description: Time before the authorization token will expire in s or suffix - ms|s|m|h|d. + description: Time before the public link authorization token will expire in + s or suffix ms|s|m|h|d. type: string downloadUrl: description: Custom endpoint for downloads. @@ -1062,17 +1061,28 @@ definitions: key: description: Application Key. type: string + lifecycle: + default: 0 + description: Set the number of days deleted files should be kept when creating + a bucket. + type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean testMode: description: A flag string for X-Bz-Test-Mode header for debugging. type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads. + type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. @@ -1111,10 +1121,16 @@ definitions: default: 100 description: Max number of times to try committing a multipart file. type: integer + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot description: The encoding for the backend. type: string + impersonate: + description: Impersonate this user ID when using a service account. + type: string listChunk: default: 1000 description: Size of listing chunk 1-1000. @@ -1137,24 +1153,6 @@ definitions: description: Cutoff for switching to multipart upload (>= 50 MiB). type: string type: object - storage.createAcdStorageRequest: - properties: - clientConfig: - allOf: - - $ref: '#/definitions/model.ClientConfig' - description: config for underlying HTTP client - config: - allOf: - - $ref: '#/definitions/storage.acdConfig' - description: config for the storage - name: - description: Name of the storage, must be unique - example: my-storage - type: string - path: - description: Path of the storage - type: string - type: object storage.createAzureblobStorageRequest: properties: clientConfig: @@ -1659,6 +1657,24 @@ definitions: description: Path of the storage type: string type: object + storage.createOosWorkload_identity_authStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.oosWorkload_identity_authConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createOpendriveStorageRequest: properties: clientConfig: @@ -1893,6 +1909,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3GCSStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3GCSConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3HuaweiOBSStorageRequest: properties: clientConfig: @@ -1965,6 +1999,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3LeviiaStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3LeviiaConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3LiaraStorageRequest: properties: clientConfig: @@ -1983,6 +2035,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3LinodeStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3LinodeConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3LyveCloudStorageRequest: properties: clientConfig: @@ -2001,6 +2071,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3MagaluStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3MagaluConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3MinioStorageRequest: properties: clientConfig: @@ -2055,6 +2143,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3PetaboxStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3PetaboxConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3QiniuStorageRequest: properties: clientConfig: @@ -2091,6 +2197,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3RcloneStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3RcloneConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3ScalewayStorageRequest: properties: clientConfig: @@ -2163,6 +2287,24 @@ definitions: description: Path of the storage type: string type: object + storage.createS3SynologyStorageRequest: + properties: + clientConfig: + allOf: + - $ref: '#/definitions/model.ClientConfig' + description: config for underlying HTTP client + config: + allOf: + - $ref: '#/definitions/storage.s3SynologyConfig' + description: config for the storage + name: + description: Name of the storage, must be unique + example: my-storage + type: string + path: + description: Path of the storage + type: string + type: object storage.createS3TencentCOSStorageRequest: properties: clientConfig: @@ -2487,6 +2629,9 @@ definitions: default: false description: Server side copy contents of shortcuts instead of the shortcut. type: boolean + description: + description: Description of the remote. + type: string disableHttp2: default: true description: Disable drive using http2. @@ -2495,11 +2640,21 @@ definitions: default: InvalidUtf8 description: The encoding for the backend. type: string + envAuth: + default: false + description: Get IAM credentials from runtime (environment variables or instance + meta data if no env vars). + example: false + type: boolean exportFormats: default: docx,xlsx,pptx,svg description: Comma separated list of preferred formats for downloading Google docs. type: string + fastListBugFix: + default: true + description: Work around a bug in Google Drive listing. + type: boolean formats: description: 'Deprecated: See export_formats.' type: string @@ -2518,6 +2673,21 @@ definitions: default: 1000 description: Size of listing chunk 100-1000, 0 to disable. type: integer + metadataLabels: + default: "off" + description: Control whether labels should be read or written in metadata. + example: "off" + type: string + metadataOwner: + default: read + description: Control whether owner should be read or written in metadata. + example: "off" + type: string + metadataPermissions: + default: "off" + description: Control whether permissions should be read or written in metadata. + example: "off" + type: string pacerBurst: default: 100 description: Number of API calls to allow without sleeping. @@ -2533,13 +2703,13 @@ definitions: description: ID of the root folder. type: string scope: - description: Scope that rclone should use when requesting access from drive. + description: Comma separated list of scopes that rclone should use when requesting + access from drive. example: drive type: string serverSideAcrossConfigs: default: false - description: Allow server-side operations (e.g. copy) to work across different - drive configs. + description: 'Deprecated: use --server-side-across-configs instead.' type: boolean serviceAccountCredentials: description: Service Account Credentials JSON blob. @@ -2551,13 +2721,17 @@ definitions: default: false description: Only show files that are shared with me. type: boolean + showAllGdocs: + default: false + description: Show all Google Docs including non-exportable ones in listings. + type: boolean sizeAsQuota: default: false description: Show sizes as storage quota usage, not actual size. type: boolean skipChecksumGphotos: default: false - description: Skip MD5 checksum on Google photos and videos only. + description: Skip checksums on Google photos and videos only. type: boolean skipDanglingShortcuts: default: false @@ -2648,6 +2822,9 @@ definitions: clientSecret: description: OAuth Client Secret. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot description: The encoding for the backend. @@ -2655,6 +2832,14 @@ definitions: impersonate: description: Impersonate this user when using a business account. type: string + pacerMinSleep: + default: 10ms + description: Minimum time to sleep between API calls. + type: string + rootNamespace: + description: Specify a different Dropbox namespace ID to use as the root for + all paths. + type: string sharedFiles: default: false description: Instructs rclone to work on individual shared files. @@ -2675,6 +2860,13 @@ definitions: apiKey: description: Your API Key, get it from https://1fichier.com/console/params.pl. type: string + cdn: + default: false + description: Set if you wish to use CDN download links. + type: boolean + description: + description: Description of the remote. + type: string encoding: default: Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot description: The encoding for the backend. @@ -2693,6 +2885,9 @@ definitions: type: object storage.filefabricConfig: properties: + description: + description: Description of the remote. + type: string encoding: default: Slash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -2731,6 +2926,9 @@ definitions: default: 0 description: Maximum number of FTP simultaneous connections, 0 for unlimited. type: integer + description: + description: Description of the remote. + type: string disableEpsv: default: false description: Disable using EPSV even if server advertises support. @@ -2783,6 +2981,9 @@ definitions: default: 1m0s description: Maximum time to wait for data connection closing status. type: string + socksProxy: + description: Socks 5 proxy host. + type: string tls: default: false description: Use Implicit FTPS (FTP over TLS). @@ -2827,6 +3028,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean encoding: default: Slash,CrLf,InvalidUtf8,Dot description: The encoding for the backend. @@ -2872,18 +3081,40 @@ definitions: tokenUrl: description: Token server url. type: string + userProject: + description: User project. + type: string type: object storage.gphotosConfig: properties: authUrl: description: Auth server URL. type: string + batchCommitTimeout: + default: 10m0s + description: Max time to wait for a batch to finish committing + type: string + batchMode: + default: sync + description: Upload file batching sync|async|off. + type: string + batchSize: + default: 0 + description: Max number of files in upload batch. + type: integer + batchTimeout: + default: 0s + description: Max time to allow an idle upload batch before uploading. + type: string clientId: description: OAuth Client Id. type: string clientSecret: description: OAuth Client Secret. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,CrLf,InvalidUtf8,Dot description: The encoding for the backend. @@ -2918,12 +3149,15 @@ definitions: description: 'Kerberos data transfer protection: authentication|integrity|privacy.' example: privacy type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,Colon,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. type: string namenode: - description: Hadoop name node and port. + description: Hadoop name nodes and ports. type: string servicePrincipalName: description: Kerberos service principal name for the namenode. @@ -2948,6 +3182,9 @@ definitions: clientSecret: description: OAuth Client Secret. type: string + description: + description: Description of the remote. + type: string disableFetchingMemberCount: default: false description: Do not fetch number of objects in directories unless it is absolutely @@ -2995,9 +3232,16 @@ definitions: type: object storage.httpConfig: properties: + description: + description: Description of the remote. + type: string headers: description: Set HTTP headers for all transactions. type: string + noEscape: + default: false + description: Do not escape URL metacharacters in path names. + type: boolean noHead: default: false description: Don't use HEAD requests. @@ -3015,6 +3259,9 @@ definitions: accessKeyId: description: IAS3 Access Key. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: true description: Don't ask the server to test against MD5 checksum calculated @@ -3043,6 +3290,18 @@ definitions: type: object storage.jottacloudConfig: properties: + authUrl: + description: Auth server URL. + type: string + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -3061,8 +3320,14 @@ definitions: description: Avoid server side versioning by deleting files and recreating files instead of overwriting them. type: boolean - trashedOnly: - default: false + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string + trashedOnly: + default: false description: Only show files that are in the trash. type: boolean uploadResumeLimit: @@ -3072,6 +3337,9 @@ definitions: type: object storage.koofrDigistorageConfig: properties: + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -3080,7 +3348,7 @@ definitions: description: Mount ID of the mount to use. type: string password: - description: Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). + description: Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. type: string setmtime: default: true @@ -3092,6 +3360,9 @@ definitions: type: object storage.koofrKoofrConfig: properties: + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -3100,7 +3371,7 @@ definitions: description: Mount ID of the mount to use. type: string password: - description: Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). + description: Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. type: string setmtime: default: true @@ -3112,6 +3383,9 @@ definitions: type: object storage.koofrOtherConfig: properties: + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -3148,6 +3422,9 @@ definitions: default: false description: Follow symlinks and copy the pointed to item. type: boolean + description: + description: Description of the remote. + type: string encoding: default: Slash,Dot description: The encoding for the backend. @@ -3161,6 +3438,10 @@ definitions: default: false description: Don't check to see if the files change during upload. type: boolean + noClone: + default: false + description: Disable reflink cloning for server-side copies. + type: boolean noPreallocate: default: false description: Disable preallocation of disk space for transferred files. @@ -3186,6 +3467,11 @@ definitions: default: false description: Don't warn about skipped symlinks. type: boolean + timeType: + default: mtime + description: Set what kind of time is returned. + example: mtime + type: string unicodeNormalization: default: false description: Apply unicode NFC normalization to paths and filenames. @@ -3198,11 +3484,23 @@ definitions: type: object storage.mailruConfig: properties: + authUrl: + description: Auth server URL. + type: string checkHash: default: true description: What should copy do if file checksum is mismatched or invalid. example: true type: boolean + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -3236,6 +3534,12 @@ definitions: on disk. example: "0" type: string + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string user: description: User name (usually email). type: string @@ -3249,6 +3553,9 @@ definitions: default: false description: Output more debug from Mega. type: boolean + description: + description: Description of the remote. + type: string encoding: default: Slash,InvalidUtf8,Dot description: The encoding for the backend. @@ -3273,6 +3580,9 @@ definitions: account: description: Set the NetStorage account name type: string + description: + description: Description of the remote. + type: string host: description: Domain+path of NetStorage host to connect to. type: string @@ -3297,6 +3607,10 @@ definitions: authUrl: description: Auth server URL. type: string + avOverride: + default: false + description: Allows download of files the server thinks has a virus. + type: boolean chunkSize: default: 10Mi description: Chunk size to upload files with - must be multiple of 320k (327,680 @@ -3308,6 +3622,13 @@ definitions: clientSecret: description: OAuth Client Secret. type: string + delta: + default: false + description: If set rclone will use delta listing to implement recursive listings. + type: boolean + description: + description: Description of the remote. + type: string disableSitePermission: default: false description: Disable the request for Sites.Read.All permission. @@ -3326,6 +3647,10 @@ definitions: default: false description: Set to make OneNote files show up in directory listings. type: boolean + hardDelete: + default: false + description: Permanently delete files on removal. + type: boolean hashType: default: auto description: Specify the hash in use for the backend. @@ -3348,6 +3673,11 @@ definitions: default: 1000 description: Size of listing chunk. type: integer + metadataPermissions: + default: "off" + description: Control whether permissions should be read or written in metadata. + example: "off" + type: string noVersions: default: false description: Remove all versions on modifying operations. @@ -3362,8 +3692,7 @@ definitions: type: string serverSideAcrossConfigs: default: false - description: Allow server-side operations (e.g. copy) to work across different - onedrive configs. + description: 'Deprecated: use --server-side-across-configs instead.' type: boolean token: description: OAuth Access Token as a JSON blob. @@ -3374,6 +3703,11 @@ definitions: type: object storage.oosEnv_authConfig: properties: + attemptResumeUpload: + default: false + description: If true attempt to resume previously started multipart upload + for the object. + type: boolean chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -3389,6 +3723,9 @@ definitions: default: 1m0s description: Timeout for copy. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -3403,8 +3740,12 @@ definitions: leavePartsOnError: default: false description: If true avoid calling abort upload on a failure, leaving all - successfully uploaded parts on S3 for manual recovery. + successfully uploaded parts for manual recovery. type: boolean + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer namespace: description: Object storage namespace type: string @@ -3436,7 +3777,7 @@ definitions: example: "" type: string sseKmsKeyId: - description: if using using your own master key in vault, this header specifies + description: if using your own master key in vault, this header specifies the example: "" type: string @@ -3457,6 +3798,11 @@ definitions: type: object storage.oosInstance_principal_authConfig: properties: + attemptResumeUpload: + default: false + description: If true attempt to resume previously started multipart upload + for the object. + type: boolean chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -3472,6 +3818,9 @@ definitions: default: 1m0s description: Timeout for copy. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -3486,8 +3835,12 @@ definitions: leavePartsOnError: default: false description: If true avoid calling abort upload on a failure, leaving all - successfully uploaded parts on S3 for manual recovery. + successfully uploaded parts for manual recovery. type: boolean + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer namespace: description: Object storage namespace type: string @@ -3519,7 +3872,7 @@ definitions: example: "" type: string sseKmsKeyId: - description: if using using your own master key in vault, this header specifies + description: if using your own master key in vault, this header specifies the example: "" type: string @@ -3540,6 +3893,11 @@ definitions: type: object storage.oosNo_authConfig: properties: + attemptResumeUpload: + default: false + description: If true attempt to resume previously started multipart upload + for the object. + type: boolean chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -3552,6 +3910,9 @@ definitions: default: 1m0s description: Timeout for copy. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -3566,8 +3927,12 @@ definitions: leavePartsOnError: default: false description: If true avoid calling abort upload on a failure, leaving all - successfully uploaded parts on S3 for manual recovery. + successfully uploaded parts for manual recovery. type: boolean + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer namespace: description: Object storage namespace type: string @@ -3599,7 +3964,7 @@ definitions: example: "" type: string sseKmsKeyId: - description: if using using your own master key in vault, this header specifies + description: if using your own master key in vault, this header specifies the example: "" type: string @@ -3620,6 +3985,11 @@ definitions: type: object storage.oosResource_principal_authConfig: properties: + attemptResumeUpload: + default: false + description: If true attempt to resume previously started multipart upload + for the object. + type: boolean chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -3635,6 +4005,9 @@ definitions: default: 1m0s description: Timeout for copy. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -3649,8 +4022,12 @@ definitions: leavePartsOnError: default: false description: If true avoid calling abort upload on a failure, leaving all - successfully uploaded parts on S3 for manual recovery. + successfully uploaded parts for manual recovery. type: boolean + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer namespace: description: Object storage namespace type: string @@ -3682,7 +4059,7 @@ definitions: example: "" type: string sseKmsKeyId: - description: if using using your own master key in vault, this header specifies + description: if using your own master key in vault, this header specifies the example: "" type: string @@ -3703,6 +4080,11 @@ definitions: type: object storage.oosUser_principal_authConfig: properties: + attemptResumeUpload: + default: false + description: If true attempt to resume previously started multipart upload + for the object. + type: boolean chunkSize: default: 5Mi description: Chunk size to use for uploading. @@ -3728,6 +4110,9 @@ definitions: default: 1m0s description: Timeout for copy. type: string + description: + description: Description of the remote. + type: string disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -3742,8 +4127,107 @@ definitions: leavePartsOnError: default: false description: If true avoid calling abort upload on a failure, leaving all - successfully uploaded parts on S3 for manual recovery. + successfully uploaded parts for manual recovery. + type: boolean + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + namespace: + description: Object storage namespace + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + region: + description: Object storage Region + type: string + sseCustomerAlgorithm: + description: If using SSE-C, the optional header that specifies "AES256" as + the encryption algorithm. + example: "" + type: string + sseCustomerKey: + description: To use SSE-C, the optional header that specifies the base64-encoded + 256-bit encryption key to use to + example: "" + type: string + sseCustomerKeyFile: + description: To use SSE-C, a file containing the base64-encoded string of + the AES-256 encryption key associated + example: "" + type: string + sseCustomerKeySha256: + description: If using SSE-C, The optional header that specifies the base64-encoded + SHA256 hash of the encryption + example: "" + type: string + sseKmsKeyId: + description: if using your own master key in vault, this header specifies + the + example: "" + type: string + storageTier: + default: Standard + description: The storage class to use when storing new objects in storage. + https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm + example: Standard + type: string + uploadConcurrency: + default: 10 + description: Concurrency for multipart uploads. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + type: object + storage.oosWorkload_identity_authConfig: + properties: + attemptResumeUpload: + default: false + description: If true attempt to resume previously started multipart upload + for the object. + type: boolean + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + compartment: + description: Object storage compartment OCID + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + copyTimeout: + default: 1m0s + description: Timeout for copy. + type: string + description: + description: Description of the remote. + type: string + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for Object storage API. + type: string + leavePartsOnError: + default: false + description: If true avoid calling abort upload on a failure, leaving all + successfully uploaded parts for manual recovery. type: boolean + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer namespace: description: Object storage namespace type: string @@ -3775,7 +4259,7 @@ definitions: example: "" type: string sseKmsKeyId: - description: if using using your own master key in vault, this header specifies + description: if using your own master key in vault, this header specifies the example: "" type: string @@ -3800,6 +4284,9 @@ definitions: default: 10Mi description: Files will be uploaded in chunks this size. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot description: The encoding for the backend. @@ -3822,6 +4309,9 @@ definitions: clientSecret: description: OAuth Client Secret. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -3853,17 +4343,53 @@ definitions: apiKey: description: API Key. type: string + authUrl: + description: Auth server URL. + type: string + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. type: string + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string type: object storage.putioConfig: properties: + authUrl: + description: Auth server URL. + type: string + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. type: string + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string type: object storage.qingstorConfig: properties: @@ -3878,6 +4404,9 @@ definitions: default: 3 description: Number of connection retries. type: integer + description: + description: Description of the remote. + type: string encoding: default: Slash,Ctl,InvalidUtf8 description: The encoding for the backend. @@ -3931,6 +4460,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -3986,11 +4523,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4023,6 +4562,10 @@ definitions: default: false description: Enables requester pays option when interacting with S3 bucket. type: boolean + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -4066,11 +4609,11 @@ definitions: example: "" type: string stsEndpoint: - description: Endpoint for STS. + description: Endpoint for STS (deprecated). type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi @@ -4080,15 +4623,36 @@ definitions: default: false description: If true use the AWS S3 accelerated endpoint. type: boolean + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4097,6 +4661,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -4127,6 +4695,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -4174,11 +4750,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4203,6 +4781,10 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -4218,21 +4800,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4241,6 +4844,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -4271,6 +4878,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -4288,7 +4903,7 @@ definitions: type: string endpoint: description: Endpoint for Arvan Cloud Object Storage (AOS) API. - example: s3.ir-thr-at1.arvanstorage.com + example: s3.ir-thr-at1.arvanstorage.ir type: string envAuth: default: false @@ -4322,11 +4937,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4351,6 +4968,10 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -4366,21 +4987,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4389,6 +5031,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -4419,6 +5065,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -4468,11 +5122,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4501,6 +5157,10 @@ definitions: description: Region to connect to. example: "" type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -4541,21 +5201,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4564,6 +5245,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -4594,6 +5279,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -4646,11 +5339,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4675,6 +5370,10 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -4715,21 +5414,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4738,6 +5458,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -4764,6 +5488,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -4810,11 +5542,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4843,6 +5577,10 @@ definitions: description: Region to connect to. example: auto type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -4854,21 +5592,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -4877,6 +5636,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -4907,6 +5670,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -4957,11 +5728,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -4990,6 +5763,10 @@ definitions: description: Region to connect to. example: "" type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -5001,21 +5778,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5024,6 +5822,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -5054,6 +5856,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5104,11 +5914,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -5137,6 +5949,10 @@ definitions: description: Region to connect to. example: "" type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -5148,21 +5964,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5171,12 +6008,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3HuaweiOBSConfig: + storage.s3GCSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5201,6 +6042,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5217,8 +6066,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for OBS API. - example: obs.af-south-1.myhuaweicloud.com + description: Endpoint for Google Cloud Storage. + example: https://storage.googleapis.com type: string envAuth: default: false @@ -5242,17 +6091,22 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -5278,9 +6132,12 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: Region to connect to. - the location where your bucket will be - created and your data stored. Need bo be same with your endpoint. - example: af-south-1 + description: Region to connect to. + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -5293,21 +6150,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5316,12 +6194,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3IBMCOSConfig: + storage.s3HuaweiOBSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5329,7 +6211,6 @@ definitions: acl: description: Canned ACL used when creating buckets and storing or copying objects. - example: private type: string bucketAcl: description: Canned ACL used when creating buckets. @@ -5347,6 +6228,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5363,8 +6252,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for IBM COS S3 API. - example: s3.us.cloud-object-storage.appdomain.cloud + description: Endpoint for OBS API. + example: obs.af-south-1.myhuaweicloud.com type: string envAuth: default: false @@ -5388,22 +6277,19 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must match endpoint when using IBM Cloud - Public. - example: us-standard - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -5429,8 +6315,13 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: Region to connect to. - example: "" + description: Region to connect to. - the location where your bucket will be + created and your data stored. Need bo be same with your endpoint. + example: af-south-1 + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -5443,21 +6334,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5466,12 +6378,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3IDriveConfig: + storage.s3IBMCOSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5479,6 +6395,7 @@ definitions: acl: description: Canned ACL used when creating buckets and storing or copying objects. + example: private type: string bucketAcl: description: Canned ACL used when creating buckets. @@ -5496,6 +6413,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5511,6 +6436,10 @@ definitions: default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string + endpoint: + description: Endpoint for IBM COS S3 API. + example: s3.us.cloud-object-storage.appdomain.cloud + type: string envAuth: default: false description: Get AWS credentials from runtime (environment variables or EC2/ECS @@ -5533,17 +6462,24 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must match endpoint when using IBM Cloud + Public. + example: us-standard + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -5568,6 +6504,14 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region to connect to. + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -5579,21 +6523,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5602,12 +6567,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3IONOSConfig: + storage.s3IDriveConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5632,6 +6601,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5647,10 +6624,6 @@ definitions: default: Slash,InvalidUtf8,Dot description: The encoding for the backend. type: string - endpoint: - description: Endpoint for IONOS S3 Object Storage. - example: s3-eu-central-1.ionoscloud.com - type: string envAuth: default: false description: Get AWS credentials from runtime (environment variables or EC2/ECS @@ -5679,11 +6652,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -5708,9 +6683,9 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region where your bucket will be created and your data stored. - example: de + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -5723,21 +6698,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5746,12 +6742,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3LiaraConfig: + storage.s3IONOSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5776,6 +6776,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5792,8 +6800,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Liara Object Storage API. - example: storage.iran.liara.space + description: Endpoint for IONOS S3 Object Storage. + example: s3-eu-central-1.ionoscloud.com type: string envAuth: default: false @@ -5823,11 +6831,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -5852,6 +6862,14 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region where your bucket will be created and your data stored. + example: de + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -5861,27 +6879,44 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in Liara - example: STANDARD - type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -5890,12 +6925,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3LyveCloudConfig: + storage.s3LeviiaConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -5920,6 +6959,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -5937,7 +6984,6 @@ definitions: type: string endpoint: description: Endpoint for S3 API. - example: s3.us-east-1.lyvecloud.seagate.com type: string envAuth: default: false @@ -5961,20 +7007,19 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6003,6 +7048,10 @@ definitions: description: Region to connect to. example: "" type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -6014,21 +7063,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6037,12 +7107,1331 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3LiaraConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for Liara Object Storage API. + example: storage.iran.liara.space + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + storageClass: + description: The storage class to use when storing new objects in Liara + example: STANDARD + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3LinodeConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for Linode Object Storage API. + example: us-southeast-1.linodeobjects.com + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3LyveCloudConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: s3.us-east-1.lyvecloud.seagate.com + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3MagaluConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + example: br-se1.magaluobjects.com + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + storageClass: + description: The storage class to use when storing new objects in Magalu. + example: STANDARD + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3MinioConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + serverSideEncryption: + description: The server-side encryption algorithm used when storing this object + in S3. + example: "" + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + sseCustomerAlgorithm: + description: If using SSE-C, the server-side encryption algorithm used when + storing this object in S3. + example: "" + type: string + sseCustomerKey: + description: To use SSE-C you may provide the secret encryption key used to + encrypt/decrypt your data. + example: "" + type: string + sseCustomerKeyBase64: + description: If using SSE-C you must provide the secret encryption key encoded + in base64 format to encrypt/decrypt your data. + example: "" + type: string + sseCustomerKeyMd5: + description: If using SSE-C you may provide the secret encryption key MD5 + checksum (optional). + example: "" + type: string + sseKmsKeyId: + description: If using KMS ID you must provide the ARN of Key. + example: "" + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3NeteaseConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean + versions: + default: false + description: Include old versions in directory listings. + type: boolean + type: object + storage.s3OtherConfig: + properties: + accessKeyId: + description: AWS Access Key ID. + type: string + acl: + description: Canned ACL used when creating buckets and storing or copying + objects. + type: string + bucketAcl: + description: Canned ACL used when creating buckets. + example: private + type: string + chunkSize: + default: 5Mi + description: Chunk size to use for uploading. + type: string + copyCutoff: + default: 4.656Gi + description: Cutoff for switching to multipart copy. + type: string + decompress: + default: false + description: If set this will decompress gzip encoded objects. + type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean + disableChecksum: + default: false + description: Don't store MD5 checksum with object metadata. + type: boolean + disableHttp2: + default: false + description: Disable usage of http2 for S3 backends. + type: boolean + downloadUrl: + description: Custom endpoint for downloads. + type: string + encoding: + default: Slash,InvalidUtf8,Dot + description: The encoding for the backend. + type: string + endpoint: + description: Endpoint for S3 API. + type: string + envAuth: + default: false + description: Get AWS credentials from runtime (environment variables or EC2/ECS + meta data if no env vars). + example: false + type: boolean + forcePathStyle: + default: true + description: If true use path style access if false use virtual hosted style. + type: boolean + listChunk: + default: 1000 + description: Size of listing chunk (response list for each ListObject S3 request). + type: integer + listUrlEncode: + default: unset + description: 'Whether to url encode listings: true/false/unset' + type: string + listVersion: + default: 0 + description: 'Version of ListObjects to use: 1,2 or 0 for auto.' + type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string + maxUploadParts: + default: 10000 + description: Maximum number of parts in a multipart upload. + type: integer + memoryPoolFlushTime: + default: 1m0s + description: How often internal memory buffer pools will be flushed. (no longer + used) + type: string + memoryPoolUseMmap: + default: false + description: Whether to use mmap buffers in internal memory pool. (no longer + used) + type: boolean + mightGzip: + default: unset + description: Set this if the backend might gzip objects. + type: string + noCheckBucket: + default: false + description: If set, don't attempt to check the bucket exists or create it. + type: boolean + noHead: + default: false + description: If set, don't HEAD uploaded objects to check integrity. + type: boolean + noHeadObject: + default: false + description: If set, do not do HEAD before GET when getting objects. + type: boolean + noSystemMetadata: + default: false + description: Suppress setting and reading of system metadata + type: boolean + profile: + description: Profile to use in the shared credentials file. + type: string + region: + description: Region to connect to. + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string + secretAccessKey: + description: AWS Secret Access Key (password). + type: string + sessionToken: + description: An AWS session token. + type: string + sharedCredentialsFile: + description: Path to the shared credentials file. + type: string + uploadConcurrency: + default: 4 + description: Concurrency for multipart uploads and copies. + type: integer + uploadCutoff: + default: 200Mi + description: Cutoff for switching to chunked upload. + type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean + useMultipartEtag: + default: unset + description: Whether to use ETag in multipart uploads for verification + type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string + usePresignedRequest: + default: false + description: Whether to use a presigned request or PutObject for single part + uploads + type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string + v2Auth: + default: false + description: If true use v2 authentication. + type: boolean + versionAt: + default: "off" + description: Show file versions as they were at the specified time. + type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3MinioConfig: + storage.s3PetaboxConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6067,6 +8456,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -6083,7 +8480,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for S3 API. + description: Endpoint for Petabox S3 Object Storage. + example: s3.petabox.io type: string envAuth: default: false @@ -6107,20 +8505,19 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6146,64 +8543,60 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: Region to connect to. - example: "" + description: Region where your bucket will be created and your data stored. + example: us-east-1 + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). type: string - serverSideEncryption: - description: The server-side encryption algorithm used when storing this object - in S3. - example: "" - type: string sessionToken: description: An AWS session token. type: string sharedCredentialsFile: description: Path to the shared credentials file. type: string - sseCustomerAlgorithm: - description: If using SSE-C, the server-side encryption algorithm used when - storing this object in S3. - example: "" - type: string - sseCustomerKey: - description: To use SSE-C you may provide the secret encryption key used to - encrypt/decrypt your data. - example: "" - type: string - sseCustomerKeyBase64: - description: If using SSE-C you must provide the secret encryption key encoded - in base64 format to encrypt/decrypt your data. - example: "" - type: string - sseCustomerKeyMd5: - description: If using SSE-C you may provide the secret encryption key MD5 - checksum (optional). - example: "" - type: string - sseKmsKeyId: - description: If using KMS ID you must provide the ARN of Key. - example: "" - type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6212,12 +8605,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3NeteaseConfig: + storage.s3QiniuConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6242,6 +8639,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -6258,7 +8663,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for S3 API. + description: Endpoint for Qiniu Object Storage. + example: s3-cn-east-1.qiniucs.com type: string envAuth: default: false @@ -6284,6 +8690,7 @@ definitions: type: integer locationConstraint: description: Location constraint - must be set to match the Region. + example: cn-east-1 type: string maxUploadParts: default: 10000 @@ -6291,11 +8698,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6322,7 +8731,11 @@ definitions: type: string region: description: Region to connect to. - example: "" + example: cn-east-1 + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -6333,23 +8746,48 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + storageClass: + description: The storage class to use when storing new objects in Qiniu. + example: STANDARD + type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6358,12 +8796,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3OtherConfig: + storage.s3RackCorpConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6388,6 +8830,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -6404,7 +8854,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for S3 API. + description: Endpoint for RackCorp Object Storage. + example: s3.rackcorp.com type: string envAuth: default: false @@ -6429,7 +8880,9 @@ definitions: description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer locationConstraint: - description: Location constraint - must be set to match the Region. + description: Location constraint - the location where your bucket will be + located and your data stored. + example: global type: string maxUploadParts: default: 10000 @@ -6437,11 +8890,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6467,8 +8922,13 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: Region to connect to. - example: "" + description: region - the location where your bucket will be created and your + data stored. + example: global + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -6481,21 +8941,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6504,12 +8985,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3QiniuConfig: + storage.s3RcloneConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6534,6 +9019,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -6550,8 +9043,7 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Qiniu Object Storage. - example: s3-cn-east-1.qiniucs.com + description: Endpoint for S3 API. type: string envAuth: default: false @@ -6577,7 +9069,6 @@ definitions: type: integer locationConstraint: description: Location constraint - must be set to match the Region. - example: cn-east-1 type: string maxUploadParts: default: 10000 @@ -6585,11 +9076,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6616,7 +9109,11 @@ definitions: type: string region: description: Region to connect to. - example: cn-east-1 + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -6627,27 +9124,44 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in Qiniu. - example: STANDARD - type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6656,12 +9170,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3RackCorpConfig: + storage.s3ScalewayConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6686,6 +9204,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -6702,8 +9228,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for RackCorp Object Storage. - example: s3.rackcorp.com + description: Endpoint for Scaleway Object Storage. + example: s3.nl-ams.scw.cloud type: string envAuth: default: false @@ -6727,22 +9253,19 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - the location where your bucket will be - located and your data stored. - example: global - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6768,9 +9291,12 @@ definitions: description: Profile to use in the shared credentials file. type: string region: - description: region - the location where your bucket will be created and your - data stored. - example: global + description: Region to connect to. + example: nl-ams + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -6781,23 +9307,48 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string + storageClass: + description: The storage class to use when storing new objects in S3. + example: "" + type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6806,12 +9357,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3ScalewayConfig: + storage.s3SeaweedFSConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6836,6 +9391,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -6852,8 +9415,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Scaleway Object Storage. - example: s3.nl-ams.scw.cloud + description: Endpoint for S3 API. + example: localhost:8333 type: string envAuth: default: false @@ -6877,17 +9440,22 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -6914,7 +9482,11 @@ definitions: type: string region: description: Region to connect to. - example: nl-ams + example: "" + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -6925,27 +9497,44 @@ definitions: sharedCredentialsFile: description: Path to the shared credentials file. type: string - storageClass: - description: The storage class to use when storing new objects in S3. - example: "" - type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -6954,12 +9543,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3SeaweedFSConfig: + storage.s3StackPathConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -6984,6 +9577,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -7000,8 +9601,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for S3 API. - example: localhost:8333 + description: Endpoint for StackPath Object Storage. + example: s3.us-east-2.stackpathstorage.com type: string envAuth: default: false @@ -7025,20 +9626,19 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer - locationConstraint: - description: Location constraint - must be set to match the Region. - type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -7067,6 +9667,10 @@ definitions: description: Region to connect to. example: "" type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -7078,21 +9682,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7101,20 +9726,20 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3StackPathConfig: + storage.s3StorjConfig: properties: accessKeyId: description: AWS Access Key ID. type: string - acl: - description: Canned ACL used when creating buckets and storing or copying - objects. - type: string bucketAcl: description: Canned ACL used when creating buckets. example: private @@ -7131,6 +9756,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -7147,8 +9780,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for StackPath Object Storage. - example: s3.us-east-2.stackpathstorage.com + description: Endpoint for Storj Gateway. + example: gateway.storjshare.io type: string envAuth: default: false @@ -7178,11 +9811,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -7207,9 +9842,9 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string - region: - description: Region to connect to. - example: "" + sdkLogMode: + default: "Off" + description: Set to debug the SDK type: string secretAccessKey: description: AWS Secret Access Key (password). @@ -7222,21 +9857,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7245,12 +9901,16 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. type: boolean type: object - storage.s3StorjConfig: + storage.s3SynologyConfig: properties: accessKeyId: description: AWS Access Key ID. @@ -7271,6 +9931,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -7287,8 +9955,8 @@ definitions: description: The encoding for the backend. type: string endpoint: - description: Endpoint for Storj Gateway. - example: gateway.storjshare.io + description: Endpoint for Synology C2 Object Storage API. + example: eu-001.s3.synologyc2.net type: string envAuth: default: false @@ -7312,17 +9980,22 @@ definitions: default: 0 description: 'Version of ListObjects to use: 1,2 or 0 for auto.' type: integer + locationConstraint: + description: Location constraint - must be set to match the Region. + type: string maxUploadParts: default: 10000 description: Maximum number of parts in a multipart upload. type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -7347,6 +10020,14 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + region: + description: Region where your data stored. + example: eu-001 + type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -7358,21 +10039,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7381,6 +10083,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -7412,6 +10118,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -7459,11 +10173,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -7488,6 +10204,10 @@ definitions: profile: description: Profile to use in the shared credentials file. type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -7504,21 +10224,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7527,6 +10268,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -7557,6 +10302,14 @@ definitions: default: false description: If set this will decompress gzip encoded objects. type: boolean + description: + description: Description of the remote. + type: string + directoryMarkers: + default: false + description: Upload an empty object with a trailing slash when a new directory + is created + type: boolean disableChecksum: default: false description: Don't store MD5 checksum with object metadata. @@ -7607,11 +10360,13 @@ definitions: type: integer memoryPoolFlushTime: default: 1m0s - description: How often internal memory buffer pools will be flushed. + description: How often internal memory buffer pools will be flushed. (no longer + used) type: string memoryPoolUseMmap: default: false - description: Whether to use mmap buffers in internal memory pool. + description: Whether to use mmap buffers in internal memory pool. (no longer + used) type: boolean mightGzip: default: unset @@ -7640,6 +10395,10 @@ definitions: description: Region to connect to. example: "" type: string + sdkLogMode: + default: "Off" + description: Set to debug the SDK + type: string secretAccessKey: description: AWS Secret Access Key (password). type: string @@ -7651,21 +10410,42 @@ definitions: type: string uploadConcurrency: default: 4 - description: Concurrency for multipart uploads. + description: Concurrency for multipart uploads and copies. type: integer uploadCutoff: default: 200Mi description: Cutoff for switching to chunked upload. type: string + useAcceptEncodingGzip: + default: unset + description: 'Whether to send `Accept-Encoding: gzip` header.' + type: string + useAlreadyExists: + default: unset + description: Set if rclone should report BucketAlreadyExists errors on bucket + creation. + type: string + useDualStack: + default: false + description: If true use AWS S3 dual-stack endpoint (IPv6 support). + type: boolean useMultipartEtag: default: unset description: Whether to use ETag in multipart uploads for verification type: string + useMultipartUploads: + default: unset + description: Set if rclone should use multipart uploads. + type: string usePresignedRequest: default: false description: Whether to use a presigned request or PutObject for single part uploads type: boolean + useUnsignedPayload: + default: unset + description: Whether to use an unsigned payload in PutObject + type: string v2Auth: default: false description: If true use v2 authentication. @@ -7674,6 +10454,10 @@ definitions: default: "off" description: Show file versions as they were at the specified time. type: string + versionDeleted: + default: false + description: Show deleted file markers when using versions. + type: boolean versions: default: false description: Include old versions in directory listings. @@ -7692,6 +10476,9 @@ definitions: default: false description: Should rclone create a library if it doesn't exist. type: boolean + description: + description: Description of the remote. + type: string encoding: default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8 description: The encoding for the backend. @@ -7731,6 +10518,17 @@ definitions: default: 64 description: The maximum number of outstanding requests for one file type: integer + connections: + default: 0 + description: Maximum number of SFTP simultaneous connections, 0 for unlimited. + type: integer + copyIsHardlink: + default: false + description: Set to enable server side copies using hardlinks. + type: boolean + description: + description: Description of the remote. + type: string disableConcurrentReads: default: false description: If set don't use concurrent reads. @@ -7747,6 +10545,9 @@ definitions: host: description: SSH host to connect to. type: string + hostKeyAlgorithms: + description: Space separated list of host key algorithms, ordered by preference. + type: string idleTimeout: default: 1m0s description: Max time before closing idle connections. @@ -7813,6 +10614,12 @@ definitions: default: false description: Set to skip any symlinks and any other non regular files. type: boolean + socksProxy: + description: Socks 5 proxy host. + type: string + ssh: + description: Path and arguments to external ssh binary. + type: string subsystem: default: sftp description: Specifies the SSH2 subsystem on the remote host. @@ -7833,10 +10640,22 @@ definitions: type: object storage.sharefileConfig: properties: + authUrl: + description: Auth server URL. + type: string chunkSize: default: 64Mi description: Upload chunk size. type: string + clientId: + description: OAuth Client Id. + type: string + clientSecret: + description: OAuth Client Secret. + type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot description: The encoding for the backend. @@ -7848,6 +10667,12 @@ definitions: description: ID of the root folder. example: "" type: string + token: + description: OAuth Access Token as a JSON blob. + type: string + tokenUrl: + description: Token server url. + type: string uploadCutoff: default: 128Mi description: Cutoff for switching to multipart upload. @@ -7862,6 +10687,9 @@ definitions: default: http://127.0.0.1:9980 description: Sia daemon API URL, like http://sia.daemon.host:9980. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -7877,6 +10705,9 @@ definitions: default: true description: Whether the server is configured to be case-insensitive. type: boolean + description: + description: Description of the remote. + type: string domain: default: WORKGROUP description: Domain name for NTLM authentication. @@ -7917,12 +10748,18 @@ definitions: accessGrant: description: Access grant. type: string + description: + description: Description of the remote. + type: string type: object storage.storjNewConfig: properties: apiKey: description: API key. type: string + description: + description: Description of the remote. + type: string passphrase: description: Encryption passphrase. type: string @@ -7949,6 +10786,9 @@ definitions: deletedId: description: Sugarsync deleted folder id. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -7995,7 +10835,10 @@ definitions: type: integer chunkSize: default: 5Gi - description: Above this size files will be chunked into a _segments container. + description: Above this size files will be chunked. + type: string + description: + description: Description of the remote. type: string domain: description: User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) @@ -8015,6 +10858,10 @@ definitions: OpenStack form. example: false type: boolean + fetchUntilEmptyPage: + default: false + description: When paginating, always fetch unless we received an empty page. + type: boolean key: description: API key or password (OS_PASSWORD). type: string @@ -8030,6 +10877,11 @@ definitions: default: false description: Disable support for static and dynamic large objects type: boolean + partialPageFetchThreshold: + default: 0 + description: When paginating, fetch if the current page is within this percentage + of the limit. + type: integer region: description: Region name - optional (OS_REGION_NAME). type: string @@ -8051,6 +10903,10 @@ definitions: description: Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID). type: string + useSegmentsContainer: + default: unset + description: Choose destination for large object segments + type: string user: description: User name to log in (OS_USERNAME). type: string @@ -8073,6 +10929,9 @@ definitions: default: epmfs description: Policy to choose upstream on CREATE category. type: string + description: + description: Description of the remote. + type: string minFreeSpace: default: 1Gi description: Minimum viable free space for lfs/eplfs policies. @@ -8090,10 +10949,17 @@ definitions: accessToken: description: Your access token. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot description: The encoding for the backend. type: string + private: + default: false + description: Set to make uploaded files private + type: boolean type: object storage.webdavConfig: properties: @@ -8103,15 +10969,38 @@ definitions: bearerTokenCommand: description: Command to run to get a bearer token. type: string + description: + description: Description of the remote. + type: string encoding: description: The encoding for the backend. type: string headers: description: Set HTTP headers for all transactions. type: string + nextcloudChunkSize: + default: 10Mi + description: Nextcloud upload chunk size. + type: string + owncloudExcludeMounts: + default: false + description: Exclude ownCloud mounted storages + type: boolean + owncloudExcludeShares: + default: false + description: Exclude ownCloud shares + type: boolean + pacerMinSleep: + default: 10ms + description: Minimum time to sleep between API calls. + type: string pass: description: Password. type: string + unixSocket: + description: Path to a unix domain socket to dial to, instead of opening a + TCP connection directly + type: string url: description: URL of http host to connect to. type: string @@ -8120,7 +11009,7 @@ definitions: type: string vendor: description: Name of the WebDAV site/service/software you are using. - example: nextcloud + example: fastmail type: string type: object storage.yandexConfig: @@ -8134,6 +11023,9 @@ definitions: clientSecret: description: OAuth Client Secret. type: string + description: + description: Description of the remote. + type: string encoding: default: Slash,Del,Ctl,InvalidUtf8,Dot description: The encoding for the backend. @@ -8142,6 +11034,11 @@ definitions: default: false description: Delete files permanently rather than putting them into the trash. type: boolean + spoofUa: + default: true + description: Set the user agent to match an official version of the yandex + disk client. May help with upload performance. + type: boolean token: description: OAuth Access Token as a JSON blob. type: string @@ -8160,6 +11057,9 @@ definitions: clientSecret: description: OAuth Client Secret. type: string + description: + description: Description of the remote. + type: string encoding: default: Del,Ctl,InvalidUtf8 description: The encoding for the backend. @@ -8186,7 +11086,6 @@ definitions: externalDocs: description: OpenAPI url: https://swagger.io/resources/open-api/ -host: localhost:9090 info: contact: name: Singularity Team @@ -8621,41 +11520,86 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: List all prepared pieces for a preparation + summary: List all prepared pieces for a preparation + tags: + - Piece + post: + consumes: + - application/json + operationId: AddPiece + parameters: + - description: Preparation ID or name + in: path + name: id + required: true + type: string + - description: Piece information + in: body + name: request + required: true + schema: + $ref: '#/definitions/dataprep.AddPieceRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Car' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Add a piece to a preparation tags: - Piece - post: + /preparation/{id}/piece/{piece_cid}: + delete: consumes: - application/json - operationId: AddPiece + description: |- + Deletes a piece (CAR) and its associated records. For data pieces, resets file ranges + to allow re-packing. For DAG pieces, resets directory export flags for re-generation. + operationId: DeletePiece parameters: - description: Preparation ID or name in: path name: id required: true type: string - - description: Piece information + - description: Piece CID + in: path + name: piece_cid + required: true + type: string + - description: Delete options in: body name: request required: true schema: - $ref: '#/definitions/dataprep.AddPieceRequest' + $ref: '#/definitions/dataprep.DeletePieceRequest' produces: - application/json responses: - "200": - description: OK - schema: - $ref: '#/definitions/model.Car' + "204": + description: No Content "400": description: Bad Request schema: $ref: '#/definitions/api.HTTPError' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.HTTPError' "500": description: Internal Server Error schema: $ref: '#/definitions/api.HTTPError' - summary: Add a piece to a preparation + summary: Delete a piece from a preparation tags: - Piece /preparation/{id}/schedules: @@ -9573,36 +12517,6 @@ paths: summary: Rename a storage connection tags: - Storage - /storage/acd: - post: - consumes: - - application/json - operationId: CreateAcdStorage - parameters: - - description: Request body - in: body - name: request - required: true - schema: - $ref: '#/definitions/storage.createAcdStorageRequest' - produces: - - application/json - responses: - "200": - description: OK - schema: - $ref: '#/definitions/model.Storage' - "400": - description: Bad Request - schema: - $ref: '#/definitions/api.HTTPError' - "500": - description: Internal Server Error - schema: - $ref: '#/definitions/api.HTTPError' - summary: Create Acd storage - tags: - - Storage /storage/azureblob: post: consumes: @@ -10449,6 +13363,38 @@ paths: API key for authentication. tags: - Storage + /storage/oos/workload_identity_auth: + post: + consumes: + - application/json + operationId: CreateOosWorkload_identity_authStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createOosWorkload_identity_authStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create Oos storage with workload_identity_auth - use workload identity + to grant OCI Container Engine for Kubernetes workloads policy-driven access + to OCI resources using OCI Identity and Access Management (IAM). + tags: + - Storage /storage/opendrive: post: consumes: @@ -10841,6 +13787,36 @@ paths: summary: Create S3 storage with Dreamhost - Dreamhost DreamObjects tags: - Storage + /storage/s3/gcs: + post: + consumes: + - application/json + operationId: CreateS3GCSStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3GCSStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with GCS - Google Cloud Storage + tags: + - Storage /storage/s3/huaweiobs: post: consumes: @@ -10961,6 +13937,36 @@ paths: summary: Create S3 storage with IONOS - IONOS Cloud tags: - Storage + /storage/s3/leviia: + post: + consumes: + - application/json + operationId: CreateS3LeviiaStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3LeviiaStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Leviia - Leviia Object Storage + tags: + - Storage /storage/s3/liara: post: consumes: @@ -10991,6 +13997,36 @@ paths: summary: Create S3 storage with Liara - Liara Object Storage tags: - Storage + /storage/s3/linode: + post: + consumes: + - application/json + operationId: CreateS3LinodeStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3LinodeStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Linode - Linode Object Storage + tags: + - Storage /storage/s3/lyvecloud: post: consumes: @@ -11021,6 +14057,36 @@ paths: summary: Create S3 storage with LyveCloud - Seagate Lyve Cloud tags: - Storage + /storage/s3/magalu: + post: + consumes: + - application/json + operationId: CreateS3MagaluStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3MagaluStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Magalu - Magalu Object Storage + tags: + - Storage /storage/s3/minio: post: consumes: @@ -11111,6 +14177,36 @@ paths: summary: Create S3 storage with Other - Any other S3 compatible provider tags: - Storage + /storage/s3/petabox: + post: + consumes: + - application/json + operationId: CreateS3PetaboxStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3PetaboxStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Petabox - Petabox Object Storage + tags: + - Storage /storage/s3/qiniu: post: consumes: @@ -11171,6 +14267,36 @@ paths: summary: Create S3 storage with RackCorp - RackCorp Object Storage tags: - Storage + /storage/s3/rclone: + post: + consumes: + - application/json + operationId: CreateS3RcloneStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3RcloneStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Rclone - Rclone S3 Server + tags: + - Storage /storage/s3/scaleway: post: consumes: @@ -11291,6 +14417,36 @@ paths: summary: Create S3 storage with Storj - Storj (S3 Compatible Gateway) tags: - Storage + /storage/s3/synology: + post: + consumes: + - application/json + operationId: CreateS3SynologyStorage + parameters: + - description: Request body + in: body + name: request + required: true + schema: + $ref: '#/definitions/storage.createS3SynologyStorageRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/model.Storage' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.HTTPError' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.HTTPError' + summary: Create S3 storage with Synology - Synology C2 Object Storage + tags: + - Storage /storage/s3/tencentcos: post: consumes: diff --git a/handler/storage/types_gen.go b/handler/storage/types_gen.go index 91408d6d..95e6f642 100644 --- a/handler/storage/types_gen.go +++ b/handler/storage/types_gen.go @@ -5,37 +5,6 @@ package storage import "github.com/data-preservation-programs/singularity/model" -type acdConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - Checkpoint string `json:"checkpoint"` // Checkpoint for internal polling (debug). - UploadWaitPerGb string `json:"uploadWaitPerGb" default:"3m0s"` // Additional time per GiB to wait after a failed complete upload to see if it appears. - TemplinkThreshold string `json:"templinkThreshold" default:"9Gi"` // Files >= this size will be downloaded via their tempLink. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. -} - -type createAcdStorageRequest struct { - Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique - Path string `json:"path"` // Path of the storage - Config acdConfig `json:"config"` // config for the storage - ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client -} - -// @ID CreateAcdStorage -// @Summary Create Acd storage -// @Tags Storage -// @Accept json -// @Produce json -// @Success 200 {object} model.Storage -// @Failure 400 {object} api.HTTPError -// @Failure 500 {object} api.HTTPError -// @Param request body createAcdStorageRequest true "Request body" -// @Router /storage/acd [post] -func createAcdStorage() {} - type azureblobConfig struct { Account string `json:"account"` // Azure Storage Account Name. EnvAuth bool `json:"envAuth" default:"false"` // Read credentials from runtime (environment variables, CLI or MSI). @@ -60,15 +29,18 @@ type azureblobConfig struct { ChunkSize string `json:"chunkSize" default:"4Mi"` // Upload chunk size. UploadConcurrency int `json:"uploadConcurrency" default:"16"` // Concurrency for multipart uploads. ListChunk int `json:"listChunk" default:"5000"` // Size of blob list. - AccessTier string `json:"accessTier"` // Access tier of blob: hot, cool or archive. + AccessTier string `json:"accessTier"` // Access tier of blob: hot, cool, cold or archive. ArchiveTierDelete bool `json:"archiveTierDelete" default:"false"` // Delete archive tier blobs before overwriting. DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8"` // The encoding for the backend. PublicAccess string `json:"publicAccess" example:""` // Public access level of a container: blob or container. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created NoCheckContainer bool `json:"noCheckContainer" default:"false"` // If set, don't attempt to check the container exists or create it. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + DeleteSnapshots string `json:"deleteSnapshots" example:""` // Set to specify how to deal with snapshots on blob deletion. + Description string `json:"description"` // Description of the remote. } type createAzureblobStorageRequest struct { @@ -101,12 +73,15 @@ type b2Config struct { UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. CopyCutoff string `json:"copyCutoff" default:"4Gi"` // Cutoff for switching to multipart copy. ChunkSize string `json:"chunkSize" default:"96Mi"` // Upload chunk size. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. DisableChecksum bool `json:"disableChecksum" default:"false"` // Disable checksums for large (> upload cutoff) files. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - DownloadAuthDuration string `json:"downloadAuthDuration" default:"1w"` // Time before the authorization token will expire in s or suffix ms|s|m|h|d. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + DownloadAuthDuration string `json:"downloadAuthDuration" default:"1w"` // Time before the public link authorization token will expire in s or suffix ms|s|m|h|d. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + Lifecycle int `json:"lifecycle" default:"0"` // Set the number of days deleted files should be kept when creating a bucket. Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createB2StorageRequest struct { @@ -142,7 +117,9 @@ type boxConfig struct { CommitRetries int `json:"commitRetries" default:"100"` // Max number of times to try committing a multipart file. ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 1-1000. OwnedBy string `json:"ownedBy"` // Only show items owned by the login (email address) passed in. + Impersonate string `json:"impersonate"` // Impersonate this user ID when using a service account. Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createBoxStorageRequest struct { @@ -165,49 +142,56 @@ type createBoxStorageRequest struct { func createBoxStorage() {} type driveConfig struct { - ClientId string `json:"clientId"` // Google Application Client Id - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - Scope string `json:"scope" example:"drive"` // Scope that rclone should use when requesting access from drive. - RootFolderId string `json:"rootFolderId"` // ID of the root folder. - ServiceAccountFile string `json:"serviceAccountFile"` // Service Account Credentials JSON file path. - ServiceAccountCredentials string `json:"serviceAccountCredentials"` // Service Account Credentials JSON blob. - TeamDrive string `json:"teamDrive"` // ID of the Shared Drive (Team Drive). - AuthOwnerOnly bool `json:"authOwnerOnly" default:"false"` // Only consider files owned by the authenticated user. - UseTrash bool `json:"useTrash" default:"true"` // Send files to the trash instead of deleting permanently. - CopyShortcutContent bool `json:"copyShortcutContent" default:"false"` // Server side copy contents of shortcuts instead of the shortcut. - SkipGdocs bool `json:"skipGdocs" default:"false"` // Skip google documents in all listings. - SkipChecksumGphotos bool `json:"skipChecksumGphotos" default:"false"` // Skip MD5 checksum on Google photos and videos only. - SharedWithMe bool `json:"sharedWithMe" default:"false"` // Only show files that are shared with me. - TrashedOnly bool `json:"trashedOnly" default:"false"` // Only show files that are in the trash. - StarredOnly bool `json:"starredOnly" default:"false"` // Only show files that are starred. - Formats string `json:"formats"` // Deprecated: See export_formats. - ExportFormats string `json:"exportFormats" default:"docx,xlsx,pptx,svg"` // Comma separated list of preferred formats for downloading Google docs. - ImportFormats string `json:"importFormats"` // Comma separated list of preferred formats for uploading Google docs. - AllowImportNameChange bool `json:"allowImportNameChange" default:"false"` // Allow the filetype to change when uploading Google docs. - UseCreatedDate bool `json:"useCreatedDate" default:"false"` // Use file created date instead of modified date. - UseSharedDate bool `json:"useSharedDate" default:"false"` // Use date file was shared instead of modified date. - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 100-1000, 0 to disable. - Impersonate string `json:"impersonate"` // Impersonate this user when using a service account. - AlternateExport bool `json:"alternateExport" default:"false"` // Deprecated: No longer needed. - UploadCutoff string `json:"uploadCutoff" default:"8Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"8Mi"` // Upload chunk size. - AcknowledgeAbuse bool `json:"acknowledgeAbuse" default:"false"` // Set to allow files which return cannotDownloadAbusiveFile to be downloaded. - KeepRevisionForever bool `json:"keepRevisionForever" default:"false"` // Keep new head revision of each file forever. - SizeAsQuota bool `json:"sizeAsQuota" default:"false"` // Show sizes as storage quota usage, not actual size. - V2DownloadMinSize string `json:"v2DownloadMinSize" default:"off"` // If Object's are greater, use drive v2 API to download. - PacerMinSleep string `json:"pacerMinSleep" default:"100ms"` // Minimum time to sleep between API calls. - PacerBurst int `json:"pacerBurst" default:"100"` // Number of API calls to allow without sleeping. - ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Allow server-side operations (e.g. copy) to work across different drive configs. - DisableHttp2 bool `json:"disableHttp2" default:"true"` // Disable drive using http2. - StopOnUploadLimit bool `json:"stopOnUploadLimit" default:"false"` // Make upload limit errors be fatal. - StopOnDownloadLimit bool `json:"stopOnDownloadLimit" default:"false"` // Make download limit errors be fatal. - SkipShortcuts bool `json:"skipShortcuts" default:"false"` // If set skip shortcut files. - SkipDanglingShortcuts bool `json:"skipDanglingShortcuts" default:"false"` // If set skip dangling shortcut files. - ResourceKey string `json:"resourceKey"` // Resource key for accessing a link-shared file. - Encoding string `json:"encoding" default:"InvalidUtf8"` // The encoding for the backend. + ClientId string `json:"clientId"` // Google Application Client Id + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + Scope string `json:"scope" example:"drive"` // Comma separated list of scopes that rclone should use when requesting access from drive. + RootFolderId string `json:"rootFolderId"` // ID of the root folder. + ServiceAccountFile string `json:"serviceAccountFile"` // Service Account Credentials JSON file path. + ServiceAccountCredentials string `json:"serviceAccountCredentials"` // Service Account Credentials JSON blob. + TeamDrive string `json:"teamDrive"` // ID of the Shared Drive (Team Drive). + AuthOwnerOnly bool `json:"authOwnerOnly" default:"false"` // Only consider files owned by the authenticated user. + UseTrash bool `json:"useTrash" default:"true"` // Send files to the trash instead of deleting permanently. + CopyShortcutContent bool `json:"copyShortcutContent" default:"false"` // Server side copy contents of shortcuts instead of the shortcut. + SkipGdocs bool `json:"skipGdocs" default:"false"` // Skip google documents in all listings. + ShowAllGdocs bool `json:"showAllGdocs" default:"false"` // Show all Google Docs including non-exportable ones in listings. + SkipChecksumGphotos bool `json:"skipChecksumGphotos" default:"false"` // Skip checksums on Google photos and videos only. + SharedWithMe bool `json:"sharedWithMe" default:"false"` // Only show files that are shared with me. + TrashedOnly bool `json:"trashedOnly" default:"false"` // Only show files that are in the trash. + StarredOnly bool `json:"starredOnly" default:"false"` // Only show files that are starred. + Formats string `json:"formats"` // Deprecated: See export_formats. + ExportFormats string `json:"exportFormats" default:"docx,xlsx,pptx,svg"` // Comma separated list of preferred formats for downloading Google docs. + ImportFormats string `json:"importFormats"` // Comma separated list of preferred formats for uploading Google docs. + AllowImportNameChange bool `json:"allowImportNameChange" default:"false"` // Allow the filetype to change when uploading Google docs. + UseCreatedDate bool `json:"useCreatedDate" default:"false"` // Use file created date instead of modified date. + UseSharedDate bool `json:"useSharedDate" default:"false"` // Use date file was shared instead of modified date. + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk 100-1000, 0 to disable. + Impersonate string `json:"impersonate"` // Impersonate this user when using a service account. + AlternateExport bool `json:"alternateExport" default:"false"` // Deprecated: No longer needed. + UploadCutoff string `json:"uploadCutoff" default:"8Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"8Mi"` // Upload chunk size. + AcknowledgeAbuse bool `json:"acknowledgeAbuse" default:"false"` // Set to allow files which return cannotDownloadAbusiveFile to be downloaded. + KeepRevisionForever bool `json:"keepRevisionForever" default:"false"` // Keep new head revision of each file forever. + SizeAsQuota bool `json:"sizeAsQuota" default:"false"` // Show sizes as storage quota usage, not actual size. + V2DownloadMinSize string `json:"v2DownloadMinSize" default:"off"` // If Object's are greater, use drive v2 API to download. + PacerMinSleep string `json:"pacerMinSleep" default:"100ms"` // Minimum time to sleep between API calls. + PacerBurst int `json:"pacerBurst" default:"100"` // Number of API calls to allow without sleeping. + ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Deprecated: use --server-side-across-configs instead. + DisableHttp2 bool `json:"disableHttp2" default:"true"` // Disable drive using http2. + StopOnUploadLimit bool `json:"stopOnUploadLimit" default:"false"` // Make upload limit errors be fatal. + StopOnDownloadLimit bool `json:"stopOnDownloadLimit" default:"false"` // Make download limit errors be fatal. + SkipShortcuts bool `json:"skipShortcuts" default:"false"` // If set skip shortcut files. + SkipDanglingShortcuts bool `json:"skipDanglingShortcuts" default:"false"` // If set skip dangling shortcut files. + ResourceKey string `json:"resourceKey"` // Resource key for accessing a link-shared file. + FastListBugFix bool `json:"fastListBugFix" default:"true"` // Work around a bug in Google Drive listing. + MetadataOwner string `json:"metadataOwner" default:"read" example:"off"` // Control whether owner should be read or written in metadata. + MetadataPermissions string `json:"metadataPermissions" default:"off" example:"off"` // Control whether permissions should be read or written in metadata. + MetadataLabels string `json:"metadataLabels" default:"off" example:"off"` // Control whether labels should be read or written in metadata. + Encoding string `json:"encoding" default:"InvalidUtf8"` // The encoding for the backend. + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get IAM credentials from runtime (environment variables or instance meta data if no env vars). + Description string `json:"description"` // Description of the remote. } type createDriveStorageRequest struct { @@ -239,11 +223,14 @@ type dropboxConfig struct { Impersonate string `json:"impersonate"` // Impersonate this user when using a business account. SharedFiles bool `json:"sharedFiles" default:"false"` // Instructs rclone to work on individual shared files. SharedFolders bool `json:"sharedFolders" default:"false"` // Instructs rclone to work on shared folders. + PacerMinSleep string `json:"pacerMinSleep" default:"10ms"` // Minimum time to sleep between API calls. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. + RootNamespace string `json:"rootNamespace"` // Specify a different Dropbox namespace ID to use as the root for all paths. BatchMode string `json:"batchMode" default:"sync"` // Upload file batching sync|async|off. BatchSize int `json:"batchSize" default:"0"` // Max number of files in upload batch. BatchTimeout string `json:"batchTimeout" default:"0s"` // Max time to allow an idle upload batch before uploading. BatchCommitTimeout string `json:"batchCommitTimeout" default:"10m0s"` // Max time to wait for a batch to finish committing - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createDropboxStorageRequest struct { @@ -270,7 +257,9 @@ type fichierConfig struct { SharedFolder string `json:"sharedFolder"` // If you want to download a shared folder, add this parameter. FilePassword string `json:"filePassword"` // If you want to download a shared file that is password protected, add this parameter. FolderPassword string `json:"folderPassword"` // If you want to list the files in a shared folder that is password protected, add this parameter. + Cdn bool `json:"cdn" default:"false"` // Set if you wish to use CDN download links. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createFichierStorageRequest struct { @@ -300,6 +289,7 @@ type filefabricConfig struct { TokenExpiry string `json:"tokenExpiry"` // Token expiry time. Version string `json:"version"` // Version read from the file fabric. Encoding string `json:"encoding" default:"Slash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createFilefabricStorageRequest struct { @@ -341,7 +331,9 @@ type ftpConfig struct { DisableTls13 bool `json:"disableTls13" default:"false"` // Disable TLS 1.3 (workaround for FTP servers with buggy TLS) ShutTimeout string `json:"shutTimeout" default:"1m0s"` // Maximum time to wait for data connection closing status. AskPassword bool `json:"askPassword" default:"false"` // Allow asking for FTP password when needed. + SocksProxy string `json:"socksProxy"` // Socks 5 proxy host. Encoding string `json:"encoding" default:"Slash,Del,Ctl,RightSpace,Dot" example:"Asterisk,Ctl,Dot,Slash"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createFtpStorageRequest struct { @@ -370,6 +362,7 @@ type gcsConfig struct { AuthUrl string `json:"authUrl"` // Auth server URL. TokenUrl string `json:"tokenUrl"` // Token server url. ProjectNumber string `json:"projectNumber"` // Project number. + UserProject string `json:"userProject"` // User project. ServiceAccountFile string `json:"serviceAccountFile"` // Service Account Credentials JSON file path. ServiceAccountCredentials string `json:"serviceAccountCredentials"` // Service Account Credentials JSON blob. Anonymous bool `json:"anonymous" default:"false"` // Access public buckets and objects without credentials. @@ -378,11 +371,13 @@ type gcsConfig struct { BucketPolicyOnly bool `json:"bucketPolicyOnly" default:"false"` // Access checks should use bucket-level IAM policies. Location string `json:"location" example:""` // Location for the newly created buckets. StorageClass string `json:"storageClass" example:""` // The storage class to use when storing objects in Google Cloud Storage. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. Endpoint string `json:"endpoint"` // Endpoint for the service. Encoding string `json:"encoding" default:"Slash,CrLf,InvalidUtf8,Dot"` // The encoding for the backend. EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars). + Description string `json:"description"` // Description of the remote. } type createGcsStorageRequest struct { @@ -405,16 +400,21 @@ type createGcsStorageRequest struct { func createGcsStorage() {} type gphotosConfig struct { - ClientId string `json:"clientId"` // OAuth Client Id. - ClientSecret string `json:"clientSecret"` // OAuth Client Secret. - Token string `json:"token"` // OAuth Access Token as a JSON blob. - AuthUrl string `json:"authUrl"` // Auth server URL. - TokenUrl string `json:"tokenUrl"` // Token server url. - ReadOnly bool `json:"readOnly" default:"false"` // Set to make the Google Photos backend read only. - ReadSize bool `json:"readSize" default:"false"` // Set to read the size of media items. - StartYear int `json:"startYear" default:"2000"` // Year limits the photos to be downloaded to those which are uploaded after the given year. - IncludeArchived bool `json:"includeArchived" default:"false"` // Also view and download archived media. - Encoding string `json:"encoding" default:"Slash,CrLf,InvalidUtf8,Dot"` // The encoding for the backend. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ReadOnly bool `json:"readOnly" default:"false"` // Set to make the Google Photos backend read only. + ReadSize bool `json:"readSize" default:"false"` // Set to read the size of media items. + StartYear int `json:"startYear" default:"2000"` // Year limits the photos to be downloaded to those which are uploaded after the given year. + IncludeArchived bool `json:"includeArchived" default:"false"` // Also view and download archived media. + Encoding string `json:"encoding" default:"Slash,CrLf,InvalidUtf8,Dot"` // The encoding for the backend. + BatchMode string `json:"batchMode" default:"sync"` // Upload file batching sync|async|off. + BatchSize int `json:"batchSize" default:"0"` // Max number of files in upload batch. + BatchTimeout string `json:"batchTimeout" default:"0s"` // Max time to allow an idle upload batch before uploading. + BatchCommitTimeout string `json:"batchCommitTimeout" default:"10m0s"` // Max time to wait for a batch to finish committing + Description string `json:"description"` // Description of the remote. } type createGphotosStorageRequest struct { @@ -437,11 +437,12 @@ type createGphotosStorageRequest struct { func createGphotosStorage() {} type hdfsConfig struct { - Namenode string `json:"namenode"` // Hadoop name node and port. + Namenode string `json:"namenode"` // Hadoop name nodes and ports. Username string `json:"username" example:"root"` // Hadoop user name. ServicePrincipalName string `json:"servicePrincipalName"` // Kerberos service principal name for the namenode. DataTransferProtection string `json:"dataTransferProtection" example:"privacy"` // Kerberos data transfer protection: authentication|integrity|privacy. Encoding string `json:"encoding" default:"Slash,Colon,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createHdfsStorageRequest struct { @@ -478,6 +479,7 @@ type hidriveConfig struct { UploadCutoff string `json:"uploadCutoff" default:"96Mi"` // Cutoff/Threshold for chunked uploads. UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for chunked uploads. Encoding string `json:"encoding" default:"Slash,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createHidriveStorageRequest struct { @@ -500,10 +502,12 @@ type createHidriveStorageRequest struct { func createHidriveStorage() {} type httpConfig struct { - Url string `json:"url"` // URL of HTTP host to connect to. - Headers string `json:"headers"` // Set HTTP headers for all transactions. - NoSlash bool `json:"noSlash" default:"false"` // Set this if the site doesn't end directories with /. - NoHead bool `json:"noHead" default:"false"` // Don't use HEAD requests. + Url string `json:"url"` // URL of HTTP host to connect to. + Headers string `json:"headers"` // Set HTTP headers for all transactions. + NoSlash bool `json:"noSlash" default:"false"` // Set this if the site doesn't end directories with /. + NoHead bool `json:"noHead" default:"false"` // Don't use HEAD requests. + NoEscape bool `json:"noEscape" default:"false"` // Do not escape URL metacharacters in path names. + Description string `json:"description"` // Description of the remote. } type createHttpStorageRequest struct { @@ -533,6 +537,7 @@ type internetarchiveConfig struct { DisableChecksum bool `json:"disableChecksum" default:"true"` // Don't ask the server to test against MD5 checksum calculated by rclone. WaitArchive string `json:"waitArchive" default:"0s"` // Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish. Encoding string `json:"encoding" default:"Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createInternetarchiveStorageRequest struct { @@ -555,12 +560,18 @@ type createInternetarchiveStorageRequest struct { func createInternetarchiveStorage() {} type jottacloudConfig struct { + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. Md5MemoryLimit string `json:"md5MemoryLimit" default:"10Mi"` // Files bigger than this will be cached on disk to calculate the MD5 if required. TrashedOnly bool `json:"trashedOnly" default:"false"` // Only show files that are in the trash. HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. UploadResumeLimit string `json:"uploadResumeLimit" default:"10Mi"` // Files bigger than this can be resumed if the upload fail's. NoVersions bool `json:"noVersions" default:"false"` // Avoid server side versioning by deleting files and recreating files instead of overwriting them. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createJottacloudStorageRequest struct { @@ -583,11 +594,12 @@ type createJottacloudStorageRequest struct { func createJottacloudStorage() {} type koofrDigistorageConfig struct { - Mountid string `json:"mountid"` // Mount ID of the mount to use. - Setmtime bool `json:"setmtime" default:"true"` // Does the backend support setting modification time. - User string `json:"user"` // Your user name. - Password string `json:"password"` // Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password). - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Mountid string `json:"mountid"` // Mount ID of the mount to use. + Setmtime bool `json:"setmtime" default:"true"` // Does the backend support setting modification time. + User string `json:"user"` // Your user name. + Password string `json:"password"` // Your password for rclone generate one at https://storage.rcs-rds.ro/app/admin/preferences/password. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createKoofrDigistorageStorageRequest struct { @@ -610,11 +622,12 @@ type createKoofrDigistorageStorageRequest struct { func createKoofrDigistorageStorage() {} type koofrKoofrConfig struct { - Mountid string `json:"mountid"` // Mount ID of the mount to use. - Setmtime bool `json:"setmtime" default:"true"` // Does the backend support setting modification time. - User string `json:"user"` // Your user name. - Password string `json:"password"` // Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password). - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Mountid string `json:"mountid"` // Mount ID of the mount to use. + Setmtime bool `json:"setmtime" default:"true"` // Does the backend support setting modification time. + User string `json:"user"` // Your user name. + Password string `json:"password"` // Your password for rclone generate one at https://app.koofr.net/app/admin/preferences/password. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createKoofrKoofrStorageRequest struct { @@ -637,12 +650,13 @@ type createKoofrKoofrStorageRequest struct { func createKoofrKoofrStorage() {} type koofrOtherConfig struct { - Endpoint string `json:"endpoint"` // The Koofr API endpoint to use. - Mountid string `json:"mountid"` // Mount ID of the mount to use. - Setmtime bool `json:"setmtime" default:"true"` // Does the backend support setting modification time. - User string `json:"user"` // Your user name. - Password string `json:"password"` // Your password for rclone (generate one at your service's settings page). - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Endpoint string `json:"endpoint"` // The Koofr API endpoint to use. + Mountid string `json:"mountid"` // Mount ID of the mount to use. + Setmtime bool `json:"setmtime" default:"true"` // Does the backend support setting modification time. + User string `json:"user"` // Your user name. + Password string `json:"password"` // Your password for rclone (generate one at your service's settings page). + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createKoofrOtherStorageRequest struct { @@ -665,20 +679,23 @@ type createKoofrOtherStorageRequest struct { func createKoofrOtherStorage() {} type localConfig struct { - Nounc bool `json:"nounc" default:"false" example:"true"` // Disable UNC (long path names) conversion on Windows. - CopyLinks bool `json:"copyLinks" default:"false"` // Follow symlinks and copy the pointed to item. - Links bool `json:"links" default:"false"` // Translate symlinks to/from regular files with a '.rclonelink' extension. - SkipLinks bool `json:"skipLinks" default:"false"` // Don't warn about skipped symlinks. - ZeroSizeLinks bool `json:"zeroSizeLinks" default:"false"` // Assume the Stat size of links is zero (and read them instead) (deprecated). - UnicodeNormalization bool `json:"unicodeNormalization" default:"false"` // Apply unicode NFC normalization to paths and filenames. - NoCheckUpdated bool `json:"noCheckUpdated" default:"false"` // Don't check to see if the files change during upload. - OneFileSystem bool `json:"oneFileSystem" default:"false"` // Don't cross filesystem boundaries (unix/macOS only). - CaseSensitive bool `json:"caseSensitive" default:"false"` // Force the filesystem to report itself as case sensitive. - CaseInsensitive bool `json:"caseInsensitive" default:"false"` // Force the filesystem to report itself as case insensitive. - NoPreallocate bool `json:"noPreallocate" default:"false"` // Disable preallocation of disk space for transferred files. - NoSparse bool `json:"noSparse" default:"false"` // Disable sparse files for multi-thread downloads. - NoSetModtime bool `json:"noSetModtime" default:"false"` // Disable setting modtime. - Encoding string `json:"encoding" default:"Slash,Dot"` // The encoding for the backend. + Nounc bool `json:"nounc" default:"false" example:"true"` // Disable UNC (long path names) conversion on Windows. + CopyLinks bool `json:"copyLinks" default:"false"` // Follow symlinks and copy the pointed to item. + Links bool `json:"links" default:"false"` // Translate symlinks to/from regular files with a '.rclonelink' extension. + SkipLinks bool `json:"skipLinks" default:"false"` // Don't warn about skipped symlinks. + ZeroSizeLinks bool `json:"zeroSizeLinks" default:"false"` // Assume the Stat size of links is zero (and read them instead) (deprecated). + UnicodeNormalization bool `json:"unicodeNormalization" default:"false"` // Apply unicode NFC normalization to paths and filenames. + NoCheckUpdated bool `json:"noCheckUpdated" default:"false"` // Don't check to see if the files change during upload. + OneFileSystem bool `json:"oneFileSystem" default:"false"` // Don't cross filesystem boundaries (unix/macOS only). + CaseSensitive bool `json:"caseSensitive" default:"false"` // Force the filesystem to report itself as case sensitive. + CaseInsensitive bool `json:"caseInsensitive" default:"false"` // Force the filesystem to report itself as case insensitive. + NoClone bool `json:"noClone" default:"false"` // Disable reflink cloning for server-side copies. + NoPreallocate bool `json:"noPreallocate" default:"false"` // Disable preallocation of disk space for transferred files. + NoSparse bool `json:"noSparse" default:"false"` // Disable sparse files for multi-thread downloads. + NoSetModtime bool `json:"noSetModtime" default:"false"` // Disable setting modtime. + TimeType string `json:"timeType" default:"mtime" example:"mtime"` // Set what kind of time is returned. + Encoding string `json:"encoding" default:"Slash,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createLocalStorageRequest struct { @@ -701,6 +718,11 @@ type createLocalStorageRequest struct { func createLocalStorage() {} type mailruConfig struct { + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. User string `json:"user"` // User name (usually email). Pass string `json:"pass"` // Password. SpeedupEnable bool `json:"speedupEnable" default:"true" example:"true"` // Skip full upload if there is another file with same data hash. @@ -711,6 +733,7 @@ type mailruConfig struct { UserAgent string `json:"userAgent"` // HTTP user agent used internally by client. Quirks string `json:"quirks"` // Comma separated list of internal maintenance flags. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createMailruStorageRequest struct { @@ -733,12 +756,13 @@ type createMailruStorageRequest struct { func createMailruStorage() {} type megaConfig struct { - User string `json:"user"` // User name. - Pass string `json:"pass"` // Password. - Debug bool `json:"debug" default:"false"` // Output more debug from Mega. - HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. - UseHttps bool `json:"useHttps" default:"false"` // Use HTTPS for transfers. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + User string `json:"user"` // User name. + Pass string `json:"pass"` // Password. + Debug bool `json:"debug" default:"false"` // Output more debug from Mega. + HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. + UseHttps bool `json:"useHttps" default:"false"` // Use HTTPS for transfers. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createMegaStorageRequest struct { @@ -761,10 +785,11 @@ type createMegaStorageRequest struct { func createMegaStorage() {} type netstorageConfig struct { - Protocol string `json:"protocol" default:"https" example:"http"` // Select between HTTP or HTTPS protocol. - Host string `json:"host"` // Domain+path of NetStorage host to connect to. - Account string `json:"account"` // Set the NetStorage account name - Secret string `json:"secret"` // Set the NetStorage account secret/G2O key for authentication. + Protocol string `json:"protocol" default:"https" example:"http"` // Select between HTTP or HTTPS protocol. + Host string `json:"host"` // Domain+path of NetStorage host to connect to. + Account string `json:"account"` // Set the NetStorage account name + Secret string `json:"secret"` // Set the NetStorage account secret/G2O key for authentication. + Description string `json:"description"` // Description of the remote. } type createNetstorageStorageRequest struct { @@ -800,14 +825,19 @@ type onedriveConfig struct { AccessScopes string `json:"accessScopes" default:"Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" example:"Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access"` // Set scopes to be requested by rclone. DisableSitePermission bool `json:"disableSitePermission" default:"false"` // Disable the request for Sites.Read.All permission. ExposeOnenoteFiles bool `json:"exposeOnenoteFiles" default:"false"` // Set to make OneNote files show up in directory listings. - ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Allow server-side operations (e.g. copy) to work across different onedrive configs. + ServerSideAcrossConfigs bool `json:"serverSideAcrossConfigs" default:"false"` // Deprecated: use --server-side-across-configs instead. ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk. NoVersions bool `json:"noVersions" default:"false"` // Remove all versions on modifying operations. + HardDelete bool `json:"hardDelete" default:"false"` // Permanently delete files on removal. LinkScope string `json:"linkScope" default:"anonymous" example:"anonymous"` // Set the scope of the links created by the link command. LinkType string `json:"linkType" default:"view" example:"view"` // Set the type of the links created by the link command. LinkPassword string `json:"linkPassword"` // Set the password for links created by the link command. HashType string `json:"hashType" default:"auto" example:"auto"` // Specify the hash in use for the backend. + AvOverride bool `json:"avOverride" default:"false"` // Allows download of files the server thinks has a virus. + Delta bool `json:"delta" default:"false"` // If set rclone will use delta listing to implement recursive listings. + MetadataPermissions string `json:"metadataPermissions" default:"off" example:"off"` // Control whether permissions should be read or written in metadata. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createOnedriveStorageRequest struct { @@ -837,18 +867,21 @@ type oosEnv_authConfig struct { StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. UploadConcurrency int `json:"uploadConcurrency" default:"10"` // Concurrency for multipart uploads. CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. CopyTimeout string `json:"copyTimeout" default:"1m0s"` // Timeout for copy. DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + AttemptResumeUpload bool `json:"attemptResumeUpload" default:"false"` // If true attempt to resume previously started multipart upload for the object. NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. SseCustomerKeyFile string `json:"sseCustomerKeyFile" example:""` // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to SseCustomerKeySha256 string `json:"sseCustomerKeySha256" example:""` // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using using your own master key in vault, this header specifies the + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using your own master key in vault, this header specifies the SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Description string `json:"description"` // Description of the remote. } type createOosEnv_authStorageRequest struct { @@ -878,18 +911,21 @@ type oosInstance_principal_authConfig struct { StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. UploadConcurrency int `json:"uploadConcurrency" default:"10"` // Concurrency for multipart uploads. CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. CopyTimeout string `json:"copyTimeout" default:"1m0s"` // Timeout for copy. DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + AttemptResumeUpload bool `json:"attemptResumeUpload" default:"false"` // If true attempt to resume previously started multipart upload for the object. NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. SseCustomerKeyFile string `json:"sseCustomerKeyFile" example:""` // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to SseCustomerKeySha256 string `json:"sseCustomerKeySha256" example:""` // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using using your own master key in vault, this header specifies the + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using your own master key in vault, this header specifies the SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Description string `json:"description"` // Description of the remote. } type createOosInstance_principal_authStorageRequest struct { @@ -918,18 +954,21 @@ type oosNo_authConfig struct { StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. UploadConcurrency int `json:"uploadConcurrency" default:"10"` // Concurrency for multipart uploads. CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. CopyTimeout string `json:"copyTimeout" default:"1m0s"` // Timeout for copy. DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + AttemptResumeUpload bool `json:"attemptResumeUpload" default:"false"` // If true attempt to resume previously started multipart upload for the object. NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. SseCustomerKeyFile string `json:"sseCustomerKeyFile" example:""` // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to SseCustomerKeySha256 string `json:"sseCustomerKeySha256" example:""` // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using using your own master key in vault, this header specifies the + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using your own master key in vault, this header specifies the SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Description string `json:"description"` // Description of the remote. } type createOosNo_authStorageRequest struct { @@ -959,18 +998,21 @@ type oosResource_principal_authConfig struct { StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. UploadConcurrency int `json:"uploadConcurrency" default:"10"` // Concurrency for multipart uploads. CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. CopyTimeout string `json:"copyTimeout" default:"1m0s"` // Timeout for copy. DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + AttemptResumeUpload bool `json:"attemptResumeUpload" default:"false"` // If true attempt to resume previously started multipart upload for the object. NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. SseCustomerKeyFile string `json:"sseCustomerKeyFile" example:""` // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to SseCustomerKeySha256 string `json:"sseCustomerKeySha256" example:""` // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using using your own master key in vault, this header specifies the + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using your own master key in vault, this header specifies the SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Description string `json:"description"` // Description of the remote. } type createOosResource_principal_authStorageRequest struct { @@ -1002,18 +1044,21 @@ type oosUser_principal_authConfig struct { StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. UploadConcurrency int `json:"uploadConcurrency" default:"10"` // Concurrency for multipart uploads. CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. CopyTimeout string `json:"copyTimeout" default:"1m0s"` // Timeout for copy. DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + AttemptResumeUpload bool `json:"attemptResumeUpload" default:"false"` // If true attempt to resume previously started multipart upload for the object. NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. SseCustomerKeyFile string `json:"sseCustomerKeyFile" example:""` // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to SseCustomerKeySha256 string `json:"sseCustomerKeySha256" example:""` // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption - SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using using your own master key in vault, this header specifies the + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using your own master key in vault, this header specifies the SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Description string `json:"description"` // Description of the remote. } type createOosUser_principal_authStorageRequest struct { @@ -1035,11 +1080,56 @@ type createOosUser_principal_authStorageRequest struct { // @Router /storage/oos/user_principal_auth [post] func createOosUser_principal_authStorage() {} +type oosWorkload_identity_authConfig struct { + Namespace string `json:"namespace"` // Object storage namespace + Compartment string `json:"compartment"` // Object storage compartment OCID + Region string `json:"region"` // Object storage Region + Endpoint string `json:"endpoint"` // Endpoint for Object storage API. + StorageTier string `json:"storageTier" default:"Standard" example:"Standard"` // The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + UploadConcurrency int `json:"uploadConcurrency" default:"10"` // Concurrency for multipart uploads. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + CopyTimeout string `json:"copyTimeout" default:"1m0s"` // Timeout for copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. + AttemptResumeUpload bool `json:"attemptResumeUpload" default:"false"` // If true attempt to resume previously started multipart upload for the object. + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + SseCustomerKeyFile string `json:"sseCustomerKeyFile" example:""` // To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated + SseCustomerKey string `json:"sseCustomerKey" example:""` // To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to + SseCustomerKeySha256 string `json:"sseCustomerKeySha256" example:""` // If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption + SseKmsKeyId string `json:"sseKmsKeyId" example:""` // if using your own master key in vault, this header specifies the + SseCustomerAlgorithm string `json:"sseCustomerAlgorithm" example:""` // If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. + Description string `json:"description"` // Description of the remote. +} + +type createOosWorkload_identity_authStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config oosWorkload_identity_authConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateOosWorkload_identity_authStorage +// @Summary Create Oos storage with workload_identity_auth - use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM). +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createOosWorkload_identity_authStorageRequest true "Request body" +// @Router /storage/oos/workload_identity_auth [post] +func createOosWorkload_identity_authStorage() {} + type opendriveConfig struct { - Username string `json:"username"` // Username. - Password string `json:"password"` // Password. - Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot"` // The encoding for the backend. - ChunkSize string `json:"chunkSize" default:"10Mi"` // Files will be uploaded in chunks this size. + Username string `json:"username"` // Username. + Password string `json:"password"` // Password. + Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot"` // The encoding for the backend. + ChunkSize string `json:"chunkSize" default:"10Mi"` // Files will be uploaded in chunks this size. + Description string `json:"description"` // Description of the remote. } type createOpendriveStorageRequest struct { @@ -1072,6 +1162,7 @@ type pcloudConfig struct { Hostname string `json:"hostname" default:"api.pcloud.com" example:"api.pcloud.com"` // Hostname to connect to. Username string `json:"username"` // Your pcloud username. Password string `json:"password"` // Your pcloud password. + Description string `json:"description"` // Description of the remote. } type createPcloudStorageRequest struct { @@ -1094,8 +1185,14 @@ type createPcloudStorageRequest struct { func createPcloudStorage() {} type premiumizemeConfig struct { - ApiKey string `json:"apiKey"` // API Key. - Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + ApiKey string `json:"apiKey"` // API Key. + Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createPremiumizemeStorageRequest struct { @@ -1118,7 +1215,13 @@ type createPremiumizemeStorageRequest struct { func createPremiumizemeStorage() {} type putioConfig struct { - Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. + Encoding string `json:"encoding" default:"Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createPutioStorageRequest struct { @@ -1151,6 +1254,7 @@ type qingstorConfig struct { ChunkSize string `json:"chunkSize" default:"4Mi"` // Chunk size to use for uploading. UploadConcurrency int `json:"uploadConcurrency" default:"1"` // Concurrency for multipart uploads. Encoding string `json:"encoding" default:"Slash,Ctl,InvalidUtf8"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createQingstorStorageRequest struct { @@ -1197,9 +1301,10 @@ type s3AWSConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). UseAccelerateEndpoint bool `json:"useAccelerateEndpoint" default:"false"` // If true use the AWS S3 accelerated endpoint. LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). @@ -1209,18 +1314,26 @@ type s3AWSConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata - StsEndpoint string `json:"stsEndpoint"` // Endpoint for STS. + StsEndpoint string `json:"stsEndpoint"` // Endpoint for STS (deprecated). + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3AWSStorageRequest struct { @@ -1258,9 +1371,10 @@ type s3AlibabaConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1268,17 +1382,25 @@ type s3AlibabaConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3AlibabaStorageRequest struct { @@ -1301,43 +1423,52 @@ type createS3AlibabaStorageRequest struct { func createS3AlibabaStorage() {} type s3ArvanCloudConfig struct { - EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). - AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. - SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). - Endpoint string `json:"endpoint" example:"s3.ir-thr-at1.arvanstorage.com"` // Endpoint for Arvan Cloud Object Storage (AOS) API. - LocationConstraint string `json:"locationConstraint" example:"ir-thr-at1"` // Location constraint - must match endpoint. - Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. - BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. - StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in ArvanCloud. - UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. - ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. - MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. - CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. - DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. - SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. - Profile string `json:"profile"` // Profile to use in the shared credentials file. - SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. - ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. - V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. - ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). - ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. - ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset - NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. - NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. - NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. - Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. - DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. - DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. - UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification - UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads - Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. - VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. - Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. - MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. - NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"s3.ir-thr-at1.arvanstorage.ir"` // Endpoint for Arvan Cloud Object Storage (AOS) API. + LocationConstraint string `json:"locationConstraint" example:"ir-thr-at1"` // Location constraint - must match endpoint. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in ArvanCloud. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3ArvanCloudStorageRequest struct { @@ -1382,9 +1513,10 @@ type s3CephConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1392,17 +1524,25 @@ type s3CephConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3CephStorageRequest struct { @@ -1446,9 +1586,10 @@ type s3ChinaMobileConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1456,17 +1597,25 @@ type s3ChinaMobileConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3ChinaMobileStorageRequest struct { @@ -1503,9 +1652,10 @@ type s3CloudflareConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1513,17 +1663,25 @@ type s3CloudflareConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3CloudflareStorageRequest struct { @@ -1562,9 +1720,10 @@ type s3DigitalOceanConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1572,17 +1731,25 @@ type s3DigitalOceanConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3DigitalOceanStorageRequest struct { @@ -1621,9 +1788,10 @@ type s3DreamhostConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1631,17 +1799,25 @@ type s3DreamhostConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3DreamhostStorageRequest struct { @@ -1663,6 +1839,74 @@ type createS3DreamhostStorageRequest struct { // @Router /storage/s3/dreamhost [post] func createS3DreamhostStorage() {} +type s3GCSConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:""` // Region to connect to. + Endpoint string `json:"endpoint" example:"https://storage.googleapis.com"` // Endpoint for Google Cloud Storage. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3GCSStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3GCSConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3GCSStorage +// @Summary Create S3 storage with GCS - Google Cloud Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3GCSStorageRequest true "Request body" +// @Router /storage/s3/gcs [post] +func createS3GCSStorage() {} + type s3HuaweiOBSConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -1679,9 +1923,10 @@ type s3HuaweiOBSConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1689,17 +1934,25 @@ type s3HuaweiOBSConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3HuaweiOBSStorageRequest struct { @@ -1738,9 +1991,10 @@ type s3IBMCOSConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1748,17 +2002,25 @@ type s3IBMCOSConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3IBMCOSStorageRequest struct { @@ -1794,9 +2056,10 @@ type s3IDriveConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1804,17 +2067,25 @@ type s3IDriveConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3IDriveStorageRequest struct { @@ -1852,9 +2123,10 @@ type s3IONOSConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1862,17 +2134,25 @@ type s3IONOSConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3IONOSStorageRequest struct { @@ -1894,6 +2174,73 @@ type createS3IONOSStorageRequest struct { // @Router /storage/s3/ionos [post] func createS3IONOSStorage() {} +type s3LeviiaConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:""` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3LeviiaStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3LeviiaConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3LeviiaStorage +// @Summary Create S3 storage with Leviia - Leviia Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3LeviiaStorageRequest true "Request body" +// @Router /storage/s3/leviia [post] +func createS3LeviiaStorage() {} + type s3LiaraConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -1910,9 +2257,10 @@ type s3LiaraConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1920,17 +2268,25 @@ type s3LiaraConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3LiaraStorageRequest struct { @@ -1952,6 +2308,72 @@ type createS3LiaraStorageRequest struct { // @Router /storage/s3/liara [post] func createS3LiaraStorage() {} +type s3LinodeConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"us-southeast-1.linodeobjects.com"` // Endpoint for Linode Object Storage API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3LinodeStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3LinodeConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3LinodeStorage +// @Summary Create S3 storage with Linode - Linode Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3LinodeStorageRequest true "Request body" +// @Router /storage/s3/linode [post] +func createS3LinodeStorage() {} + type s3LyveCloudConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -1969,9 +2391,10 @@ type s3LyveCloudConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -1979,17 +2402,25 @@ type s3LyveCloudConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3LyveCloudStorageRequest struct { @@ -2011,6 +2442,73 @@ type createS3LyveCloudStorageRequest struct { // @Router /storage/s3/lyvecloud [post] func createS3LyveCloudStorage() {} +type s3MagaluConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Endpoint string `json:"endpoint" example:"br-se1.magaluobjects.com"` // Endpoint for S3 API. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + StorageClass string `json:"storageClass" example:"STANDARD"` // The storage class to use when storing new objects in Magalu. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3MagaluStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3MagaluConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3MagaluStorage +// @Summary Create S3 storage with Magalu - Magalu Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3MagaluStorageRequest true "Request body" +// @Router /storage/s3/magalu [post] +func createS3MagaluStorage() {} + type s3MinioConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -2034,9 +2532,10 @@ type s3MinioConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2044,17 +2543,25 @@ type s3MinioConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3MinioStorageRequest struct { @@ -2093,9 +2600,10 @@ type s3NeteaseConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2103,17 +2611,25 @@ type s3NeteaseConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3NeteaseStorageRequest struct { @@ -2152,9 +2668,10 @@ type s3OtherConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2162,17 +2679,25 @@ type s3OtherConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3OtherStorageRequest struct { @@ -2194,6 +2719,73 @@ type createS3OtherStorageRequest struct { // @Router /storage/s3/other [post] func createS3OtherStorage() {} +type s3PetaboxConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"us-east-1"` // Region where your bucket will be created and your data stored. + Endpoint string `json:"endpoint" example:"s3.petabox.io"` // Endpoint for Petabox S3 Object Storage. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3PetaboxStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3PetaboxConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3PetaboxStorage +// @Summary Create S3 storage with Petabox - Petabox Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3PetaboxStorageRequest true "Request body" +// @Router /storage/s3/petabox [post] +func createS3PetaboxStorage() {} + type s3QiniuConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -2212,9 +2804,10 @@ type s3QiniuConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2222,17 +2815,25 @@ type s3QiniuConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3QiniuStorageRequest struct { @@ -2271,9 +2872,10 @@ type s3RackCorpConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2281,17 +2883,25 @@ type s3RackCorpConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3RackCorpStorageRequest struct { @@ -2313,6 +2923,74 @@ type createS3RackCorpStorageRequest struct { // @Router /storage/s3/rackcorp [post] func createS3RackCorpStorage() {} +type s3RcloneConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:""` // Region to connect to. + Endpoint string `json:"endpoint"` // Endpoint for S3 API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + Acl string `json:"acl"` // Canned ACL used when creating buckets and storing or copying objects. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3RcloneStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3RcloneConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3RcloneStorage +// @Summary Create S3 storage with Rclone - Rclone S3 Server +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3RcloneStorageRequest true "Request body" +// @Router /storage/s3/rclone [post] +func createS3RcloneStorage() {} + type s3ScalewayConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -2330,9 +3008,10 @@ type s3ScalewayConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2340,17 +3019,25 @@ type s3ScalewayConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3ScalewayStorageRequest struct { @@ -2389,9 +3076,10 @@ type s3SeaweedFSConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2399,17 +3087,25 @@ type s3SeaweedFSConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3SeaweedFSStorageRequest struct { @@ -2447,9 +3143,10 @@ type s3StackPathConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2457,17 +3154,25 @@ type s3StackPathConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3StackPathStorageRequest struct { @@ -2503,9 +3208,10 @@ type s3StorjConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2513,17 +3219,25 @@ type s3StorjConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3StorjStorageRequest struct { @@ -2545,6 +3259,73 @@ type createS3StorjStorageRequest struct { // @Router /storage/s3/storj [post] func createS3StorjStorage() {} +type s3SynologyConfig struct { + EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. + SecretAccessKey string `json:"secretAccessKey"` // AWS Secret Access Key (password). + Region string `json:"region" example:"eu-001"` // Region where your data stored. + Endpoint string `json:"endpoint" example:"eu-001.s3.synologyc2.net"` // Endpoint for Synology C2 Object Storage API. + LocationConstraint string `json:"locationConstraint"` // Location constraint - must be set to match the Region. + BucketAcl string `json:"bucketAcl" example:"private"` // Canned ACL used when creating buckets. + UploadCutoff string `json:"uploadCutoff" default:"200Mi"` // Cutoff for switching to chunked upload. + ChunkSize string `json:"chunkSize" default:"5Mi"` // Chunk size to use for uploading. + MaxUploadParts int `json:"maxUploadParts" default:"10000"` // Maximum number of parts in a multipart upload. + CopyCutoff string `json:"copyCutoff" default:"4.656Gi"` // Cutoff for switching to multipart copy. + DisableChecksum bool `json:"disableChecksum" default:"false"` // Don't store MD5 checksum with object metadata. + SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. + Profile string `json:"profile"` // Profile to use in the shared credentials file. + SessionToken string `json:"sessionToken"` // An AWS session token. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. + ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. + V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). + ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). + ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. + ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset + NoCheckBucket bool `json:"noCheckBucket" default:"false"` // If set, don't attempt to check the bucket exists or create it. + NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. + NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. + Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) + DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. + DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created + UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject + UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads + Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. + VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. + Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. + MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. + NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. +} + +type createS3SynologyStorageRequest struct { + Name string `json:"name" example:"my-storage"` // Name of the storage, must be unique + Path string `json:"path"` // Path of the storage + Config s3SynologyConfig `json:"config"` // config for the storage + ClientConfig model.ClientConfig `json:"clientConfig"` // config for underlying HTTP client +} + +// @ID CreateS3SynologyStorage +// @Summary Create S3 storage with Synology - Synology C2 Object Storage +// @Tags Storage +// @Accept json +// @Produce json +// @Success 200 {object} model.Storage +// @Failure 400 {object} api.HTTPError +// @Failure 500 {object} api.HTTPError +// @Param request body createS3SynologyStorageRequest true "Request body" +// @Router /storage/s3/synology [post] +func createS3SynologyStorage() {} + type s3TencentCOSConfig struct { EnvAuth bool `json:"envAuth" default:"false" example:"false"` // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). AccessKeyId string `json:"accessKeyId"` // AWS Access Key ID. @@ -2561,9 +3342,10 @@ type s3TencentCOSConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2571,17 +3353,25 @@ type s3TencentCOSConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3TencentCOSStorageRequest struct { @@ -2620,9 +3410,10 @@ type s3WasabiConfig struct { SharedCredentialsFile string `json:"sharedCredentialsFile"` // Path to the shared credentials file. Profile string `json:"profile"` // Profile to use in the shared credentials file. SessionToken string `json:"sessionToken"` // An AWS session token. - UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads. + UploadConcurrency int `json:"uploadConcurrency" default:"4"` // Concurrency for multipart uploads and copies. ForcePathStyle bool `json:"forcePathStyle" default:"true"` // If true use path style access if false use virtual hosted style. V2Auth bool `json:"v2Auth" default:"false"` // If true use v2 authentication. + UseDualStack bool `json:"useDualStack" default:"false"` // If true use AWS S3 dual-stack endpoint (IPv6 support). ListChunk int `json:"listChunk" default:"1000"` // Size of listing chunk (response list for each ListObject S3 request). ListVersion int `json:"listVersion" default:"0"` // Version of ListObjects to use: 1,2 or 0 for auto. ListUrlEncode string `json:"listUrlEncode" default:"unset"` // Whether to url encode listings: true/false/unset @@ -2630,17 +3421,25 @@ type s3WasabiConfig struct { NoHead bool `json:"noHead" default:"false"` // If set, don't HEAD uploaded objects to check integrity. NoHeadObject bool `json:"noHeadObject" default:"false"` // If set, do not do HEAD before GET when getting objects. Encoding string `json:"encoding" default:"Slash,InvalidUtf8,Dot"` // The encoding for the backend. - MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. - MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. + MemoryPoolFlushTime string `json:"memoryPoolFlushTime" default:"1m0s"` // How often internal memory buffer pools will be flushed. (no longer used) + MemoryPoolUseMmap bool `json:"memoryPoolUseMmap" default:"false"` // Whether to use mmap buffers in internal memory pool. (no longer used) DisableHttp2 bool `json:"disableHttp2" default:"false"` // Disable usage of http2 for S3 backends. DownloadUrl string `json:"downloadUrl"` // Custom endpoint for downloads. + DirectoryMarkers bool `json:"directoryMarkers" default:"false"` // Upload an empty object with a trailing slash when a new directory is created UseMultipartEtag string `json:"useMultipartEtag" default:"unset"` // Whether to use ETag in multipart uploads for verification + UseUnsignedPayload string `json:"useUnsignedPayload" default:"unset"` // Whether to use an unsigned payload in PutObject UsePresignedRequest bool `json:"usePresignedRequest" default:"false"` // Whether to use a presigned request or PutObject for single part uploads Versions bool `json:"versions" default:"false"` // Include old versions in directory listings. VersionAt string `json:"versionAt" default:"off"` // Show file versions as they were at the specified time. + VersionDeleted bool `json:"versionDeleted" default:"false"` // Show deleted file markers when using versions. Decompress bool `json:"decompress" default:"false"` // If set this will decompress gzip encoded objects. MightGzip string `json:"mightGzip" default:"unset"` // Set this if the backend might gzip objects. + UseAcceptEncodingGzip string `json:"useAcceptEncodingGzip" default:"unset"` // Whether to send `Accept-Encoding: gzip` header. NoSystemMetadata bool `json:"noSystemMetadata" default:"false"` // Suppress setting and reading of system metadata + UseAlreadyExists string `json:"useAlreadyExists" default:"unset"` // Set if rclone should report BucketAlreadyExists errors on bucket creation. + UseMultipartUploads string `json:"useMultipartUploads" default:"unset"` // Set if rclone should use multipart uploads. + SdkLogMode string `json:"sdkLogMode" default:"Off"` // Set to debug the SDK + Description string `json:"description"` // Description of the remote. } type createS3WasabiStorageRequest struct { @@ -2672,6 +3471,7 @@ type seafileConfig struct { CreateLibrary bool `json:"createLibrary" default:"false"` // Should rclone create a library if it doesn't exist. AuthToken string `json:"authToken"` // Authentication token. Encoding string `json:"encoding" default:"Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSeafileStorageRequest struct { @@ -2721,10 +3521,16 @@ type sftpConfig struct { IdleTimeout string `json:"idleTimeout" default:"1m0s"` // Max time before closing idle connections. ChunkSize string `json:"chunkSize" default:"32Ki"` // Upload and download chunk size. Concurrency int `json:"concurrency" default:"64"` // The maximum number of outstanding requests for one file + Connections int `json:"connections" default:"0"` // Maximum number of SFTP simultaneous connections, 0 for unlimited. SetEnv string `json:"setEnv"` // Environment variables to pass to sftp and commands Ciphers string `json:"ciphers"` // Space separated list of ciphers to be used for session encryption, ordered by preference. KeyExchange string `json:"keyExchange"` // Space separated list of key exchange algorithms, ordered by preference. Macs string `json:"macs"` // Space separated list of MACs (message authentication code) algorithms, ordered by preference. + HostKeyAlgorithms string `json:"hostKeyAlgorithms"` // Space separated list of host key algorithms, ordered by preference. + Ssh string `json:"ssh"` // Path and arguments to external ssh binary. + SocksProxy string `json:"socksProxy"` // Socks 5 proxy host. + CopyIsHardlink bool `json:"copyIsHardlink" default:"false"` // Set to enable server side copies using hardlinks. + Description string `json:"description"` // Description of the remote. } type createSftpStorageRequest struct { @@ -2747,11 +3553,17 @@ type createSftpStorageRequest struct { func createSftpStorage() {} type sharefileConfig struct { + ClientId string `json:"clientId"` // OAuth Client Id. + ClientSecret string `json:"clientSecret"` // OAuth Client Secret. + Token string `json:"token"` // OAuth Access Token as a JSON blob. + AuthUrl string `json:"authUrl"` // Auth server URL. + TokenUrl string `json:"tokenUrl"` // Token server url. UploadCutoff string `json:"uploadCutoff" default:"128Mi"` // Cutoff for switching to multipart upload. RootFolderId string `json:"rootFolderId" example:""` // ID of the root folder. ChunkSize string `json:"chunkSize" default:"64Mi"` // Upload chunk size. Endpoint string `json:"endpoint"` // Endpoint for API calls. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSharefileStorageRequest struct { @@ -2778,6 +3590,7 @@ type siaConfig struct { ApiPassword string `json:"apiPassword"` // Sia Daemon API Password. UserAgent string `json:"userAgent" default:"Sia-Agent"` // Siad User Agent Encoding string `json:"encoding" default:"Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSiaStorageRequest struct { @@ -2810,6 +3623,7 @@ type smbConfig struct { HideSpecialShare bool `json:"hideSpecialShare" default:"true"` // Hide special shares (e.g. print$) which users aren't supposed to access. CaseInsensitive bool `json:"caseInsensitive" default:"true"` // Whether the server is configured to be case-insensitive. Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSmbStorageRequest struct { @@ -2833,6 +3647,7 @@ func createSmbStorage() {} type storjExistingConfig struct { AccessGrant string `json:"accessGrant"` // Access grant. + Description string `json:"description"` // Description of the remote. } type createStorjExistingStorageRequest struct { @@ -2858,6 +3673,7 @@ type storjNewConfig struct { SatelliteAddress string `json:"satelliteAddress" default:"us1.storj.io" example:"us1.storj.io"` // Satellite address. ApiKey string `json:"apiKey"` // API key. Passphrase string `json:"passphrase"` // Encryption passphrase. + Description string `json:"description"` // Description of the remote. } type createStorjNewStorageRequest struct { @@ -2891,6 +3707,7 @@ type sugarsyncConfig struct { RootId string `json:"rootId"` // Sugarsync root id. DeletedId string `json:"deletedId"` // Sugarsync deleted folder id. Encoding string `json:"encoding" default:"Slash,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSugarsyncStorageRequest struct { @@ -2932,10 +3749,14 @@ type swiftConfig struct { EndpointType string `json:"endpointType" default:"public" example:"public"` // Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE). LeavePartsOnError bool `json:"leavePartsOnError" default:"false"` // If true avoid calling abort upload on a failure. StoragePolicy string `json:"storagePolicy" example:""` // The storage policy to use when creating a new container. - ChunkSize string `json:"chunkSize" default:"5Gi"` // Above this size files will be chunked into a _segments container. + FetchUntilEmptyPage bool `json:"fetchUntilEmptyPage" default:"false"` // When paginating, always fetch unless we received an empty page. + PartialPageFetchThreshold int `json:"partialPageFetchThreshold" default:"0"` // When paginating, fetch if the current page is within this percentage of the limit. + ChunkSize string `json:"chunkSize" default:"5Gi"` // Above this size files will be chunked. NoChunk bool `json:"noChunk" default:"false"` // Don't chunk files during streaming upload. NoLargeObjects bool `json:"noLargeObjects" default:"false"` // Disable support for static and dynamic large objects + UseSegmentsContainer string `json:"useSegmentsContainer" default:"unset"` // Choose destination for large object segments Encoding string `json:"encoding" default:"Slash,InvalidUtf8"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createSwiftStorageRequest struct { @@ -2964,6 +3785,7 @@ type unionConfig struct { SearchPolicy string `json:"searchPolicy" default:"ff"` // Policy to choose upstream on SEARCH category. CacheTime int `json:"cacheTime" default:"120"` // Cache time of usage and free space (in seconds). MinFreeSpace string `json:"minFreeSpace" default:"1Gi"` // Minimum viable free space for lfs/eplfs policies. + Description string `json:"description"` // Description of the remote. } type createUnionStorageRequest struct { @@ -2987,7 +3809,9 @@ func createUnionStorage() {} type uptoboxConfig struct { AccessToken string `json:"accessToken"` // Your access token. + Private bool `json:"private" default:"false"` // Set to make uploaded files private Encoding string `json:"encoding" default:"Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createUptoboxStorageRequest struct { @@ -3010,14 +3834,20 @@ type createUptoboxStorageRequest struct { func createUptoboxStorage() {} type webdavConfig struct { - Url string `json:"url"` // URL of http host to connect to. - Vendor string `json:"vendor" example:"nextcloud"` // Name of the WebDAV site/service/software you are using. - User string `json:"user"` // User name. - Pass string `json:"pass"` // Password. - BearerToken string `json:"bearerToken"` // Bearer token instead of user/pass (e.g. a Macaroon). - BearerTokenCommand string `json:"bearerTokenCommand"` // Command to run to get a bearer token. - Encoding string `json:"encoding"` // The encoding for the backend. - Headers string `json:"headers"` // Set HTTP headers for all transactions. + Url string `json:"url"` // URL of http host to connect to. + Vendor string `json:"vendor" example:"fastmail"` // Name of the WebDAV site/service/software you are using. + User string `json:"user"` // User name. + Pass string `json:"pass"` // Password. + BearerToken string `json:"bearerToken"` // Bearer token instead of user/pass (e.g. a Macaroon). + BearerTokenCommand string `json:"bearerTokenCommand"` // Command to run to get a bearer token. + Encoding string `json:"encoding"` // The encoding for the backend. + Headers string `json:"headers"` // Set HTTP headers for all transactions. + PacerMinSleep string `json:"pacerMinSleep" default:"10ms"` // Minimum time to sleep between API calls. + NextcloudChunkSize string `json:"nextcloudChunkSize" default:"10Mi"` // Nextcloud upload chunk size. + OwncloudExcludeShares bool `json:"owncloudExcludeShares" default:"false"` // Exclude ownCloud shares + OwncloudExcludeMounts bool `json:"owncloudExcludeMounts" default:"false"` // Exclude ownCloud mounted storages + UnixSocket string `json:"unixSocket"` // Path to a unix domain socket to dial to, instead of opening a TCP connection directly + Description string `json:"description"` // Description of the remote. } type createWebdavStorageRequest struct { @@ -3047,6 +3877,8 @@ type yandexConfig struct { TokenUrl string `json:"tokenUrl"` // Token server url. HardDelete bool `json:"hardDelete" default:"false"` // Delete files permanently rather than putting them into the trash. Encoding string `json:"encoding" default:"Slash,Del,Ctl,InvalidUtf8,Dot"` // The encoding for the backend. + SpoofUa bool `json:"spoofUa" default:"true"` // Set the user agent to match an official version of the yandex disk client. May help with upload performance. + Description string `json:"description"` // Description of the remote. } type createYandexStorageRequest struct { @@ -3076,6 +3908,7 @@ type zohoConfig struct { TokenUrl string `json:"tokenUrl"` // Token server url. Region string `json:"region" example:"com"` // Zoho region to connect to. Encoding string `json:"encoding" default:"Del,Ctl,InvalidUtf8"` // The encoding for the backend. + Description string `json:"description"` // Description of the remote. } type createZohoStorageRequest struct {