From 57215af8bd300649948a7fe0694a3704376d7e93 Mon Sep 17 00:00:00 2001 From: Zunli Hu Date: Mon, 12 Oct 2020 13:43:08 +0800 Subject: [PATCH] {0.5.0b1} Add new api support (#39) * add new SDK * add new SDK * add release note Co-authored-by: Ubuntu --- README.rst | 10 + .../storagev2/blob/v2020_02_10/__init__.py | 229 ++ .../blob/v2020_02_10/_blob_client.py | 3581 +++++++++++++++++ .../blob/v2020_02_10/_blob_service_client.py | 686 ++++ .../blob/v2020_02_10/_container_client.py | 1448 +++++++ .../blob/v2020_02_10/_deserialize.py | 158 + .../storagev2/blob/v2020_02_10/_download.py | 580 +++ .../blob/v2020_02_10/_generated/__init__.py | 18 + .../_generated/_azure_blob_storage.py | 83 + .../v2020_02_10/_generated/_configuration.py | 52 + .../v2020_02_10/_generated/aio/__init__.py | 13 + .../aio/_azure_blob_storage_async.py | 84 + .../_generated/aio/_configuration_async.py | 53 + .../aio/operations_async/__init__.py | 28 + .../_append_blob_operations_async.py | 694 ++++ .../_blob_operations_async.py | 3067 ++++++++++++++ .../_block_blob_operations_async.py | 833 ++++ .../_container_operations_async.py | 1400 +++++++ .../_directory_operations_async.py | 739 ++++ .../_page_blob_operations_async.py | 1399 +++++++ .../_service_operations_async.py | 664 +++ .../v2020_02_10/_generated/models/__init__.py | 229 ++ .../models/_azure_blob_storage_enums.py | 343 ++ .../v2020_02_10/_generated/models/_models.py | 2009 +++++++++ .../_generated/models/_models_py3.py | 2009 +++++++++ .../_generated/operations/__init__.py | 28 + .../operations/_append_blob_operations.py | 694 ++++ .../_generated/operations/_blob_operations.py | 3065 ++++++++++++++ .../operations/_block_blob_operations.py | 833 ++++ .../operations/_container_operations.py | 1400 +++++++ .../operations/_directory_operations.py | 739 ++++ .../operations/_page_blob_operations.py | 1399 +++++++ .../operations/_service_operations.py | 663 +++ .../blob/v2020_02_10/_generated/version.py | 13 + .../storagev2/blob/v2020_02_10/_lease.py | 331 ++ .../blob/v2020_02_10/_list_blobs_helper.py | 166 + .../storagev2/blob/v2020_02_10/_models.py | 1173 ++++++ .../blob/v2020_02_10/_quick_query_helper.py | 196 + .../storagev2/blob/v2020_02_10/_serialize.py | 196 + .../blob/v2020_02_10/_shared/__init__.py | 56 + .../v2020_02_10/_shared/authentication.py | 142 + .../blob/v2020_02_10/_shared/avro/__init__.py | 5 + .../blob/v2020_02_10/_shared/avro/avro_io.py | 464 +++ .../v2020_02_10/_shared/avro/avro_io_async.py | 448 +++ .../blob/v2020_02_10/_shared/avro/datafile.py | 266 ++ .../_shared/avro/datafile_async.py | 215 + .../blob/v2020_02_10/_shared/avro/schema.py | 1221 ++++++ .../blob/v2020_02_10/_shared/base_client.py | 443 ++ .../v2020_02_10/_shared/base_client_async.py | 185 + .../blob/v2020_02_10/_shared/constants.py | 27 + .../blob/v2020_02_10/_shared/encryption.py | 542 +++ .../blob/v2020_02_10/_shared/models.py | 466 +++ .../blob/v2020_02_10/_shared/parser.py | 20 + .../blob/v2020_02_10/_shared/policies.py | 610 +++ .../v2020_02_10/_shared/policies_async.py | 220 + .../v2020_02_10/_shared/request_handlers.py | 147 + .../v2020_02_10/_shared/response_handlers.py | 159 + .../_shared/shared_access_signature.py | 220 + .../blob/v2020_02_10/_shared/uploads.py | 550 +++ .../blob/v2020_02_10/_shared/uploads_async.py | 350 ++ .../v2020_02_10/_shared_access_signature.py | 596 +++ .../blob/v2020_02_10/_upload_helpers.py | 291 ++ .../storagev2/blob/v2020_02_10/_version.py | 7 + .../blob/v2020_02_10/aio/__init__.py | 137 + .../v2020_02_10/aio/_blob_client_async.py | 2335 +++++++++++ .../aio/_blob_service_client_async.py | 641 +++ .../aio/_container_client_async.py | 1121 ++++++ .../blob/v2020_02_10/aio/_download_async.py | 491 +++ .../blob/v2020_02_10/aio/_lease_async.py | 327 ++ .../v2020_02_10/aio/_list_blobs_helper.py | 162 + .../storagev2/blob/v2020_02_10/aio/_models.py | 141 + .../blob/v2020_02_10/aio/_upload_helpers.py | 266 ++ .../filedatalake/v2020_02_10/__init__.py | 93 + .../_data_lake_directory_client.py | 539 +++ .../v2020_02_10/_data_lake_file_client.py | 753 ++++ .../v2020_02_10/_data_lake_lease.py | 245 ++ .../v2020_02_10/_data_lake_service_client.py | 421 ++ .../filedatalake/v2020_02_10/_deserialize.py | 146 + .../filedatalake/v2020_02_10/_download.py | 52 + .../v2020_02_10/_file_system_client.py | 783 ++++ .../v2020_02_10/_generated/__init__.py | 18 + .../v2020_02_10/_generated/_configuration.py | 62 + .../_generated/_data_lake_storage_client.py | 67 + .../v2020_02_10/_generated/aio/__init__.py | 13 + .../_generated/aio/_configuration_async.py | 63 + .../aio/_data_lake_storage_client_async.py | 68 + .../aio/operations_async/__init__.py | 20 + .../_file_system_operations_async.py | 462 +++ .../_path_operations_async.py | 1698 ++++++++ .../_service_operations_async.py | 128 + .../v2020_02_10/_generated/models/__init__.py | 68 + .../models/_data_lake_storage_client_enums.py | 63 + .../v2020_02_10/_generated/models/_models.py | 350 ++ .../_generated/models/_models_py3.py | 350 ++ .../_generated/operations/__init__.py | 20 + .../operations/_file_system_operations.py | 462 +++ .../_generated/operations/_path_operations.py | 1697 ++++++++ .../operations/_service_operations.py | 128 + .../v2020_02_10/_generated/version.py | 13 + .../v2020_02_10/_list_paths_helper.py | 73 + .../filedatalake/v2020_02_10/_models.py | 837 ++++ .../filedatalake/v2020_02_10/_path_client.py | 872 ++++ .../v2020_02_10/_quick_query_helper.py | 71 + .../filedatalake/v2020_02_10/_serialize.py | 89 + .../v2020_02_10/_shared/__init__.py | 56 + .../v2020_02_10/_shared/authentication.py | 142 + .../v2020_02_10/_shared/base_client.py | 437 ++ .../v2020_02_10/_shared/base_client_async.py | 179 + .../v2020_02_10/_shared/constants.py | 26 + .../v2020_02_10/_shared/encryption.py | 542 +++ .../v2020_02_10/_shared/models.py | 468 +++ .../v2020_02_10/_shared/parser.py | 20 + .../v2020_02_10/_shared/policies.py | 610 +++ .../v2020_02_10/_shared/policies_async.py | 220 + .../v2020_02_10/_shared/request_handlers.py | 147 + .../v2020_02_10/_shared/response_handlers.py | 159 + .../_shared/shared_access_signature.py | 220 + .../v2020_02_10/_shared/uploads.py | 568 +++ .../v2020_02_10/_shared/uploads_async.py | 367 ++ .../v2020_02_10/_shared_access_signature.py | 391 ++ .../v2020_02_10/_upload_helper.py | 87 + .../filedatalake/v2020_02_10/_version.py | 7 + .../filedatalake/v2020_02_10/aio/__init__.py | 24 + .../aio/_data_lake_directory_client_async.py | 526 +++ .../aio/_data_lake_file_client_async.py | 553 +++ .../v2020_02_10/aio/_data_lake_lease_async.py | 243 ++ .../aio/_data_lake_service_client_async.py | 372 ++ .../v2020_02_10/aio/_download_async.py | 52 + .../aio/_file_system_client_async.py | 745 ++++ .../filedatalake/v2020_02_10/aio/_models.py | 110 + .../v2020_02_10/aio/_path_client_async.py | 703 ++++ .../v2020_02_10/aio/_upload_helper.py | 87 + .../fileshare/v2020_02_10/__init__.py | 74 + .../fileshare/v2020_02_10/_deserialize.py | 83 + .../v2020_02_10/_directory_client.py | 706 ++++ .../fileshare/v2020_02_10/_download.py | 522 +++ .../fileshare/v2020_02_10/_file_client.py | 1395 +++++++ .../v2020_02_10/_generated/__init__.py | 18 + .../_generated/_azure_file_storage.py | 71 + .../v2020_02_10/_generated/_configuration.py | 58 + .../v2020_02_10/_generated/aio/__init__.py | 13 + .../aio/_azure_file_storage_async.py | 72 + .../_generated/aio/_configuration_async.py | 59 + .../aio/operations_async/__init__.py | 22 + .../_directory_operations_async.py | 672 ++++ .../_file_operations_async.py | 1671 ++++++++ .../_service_operations_async.py | 253 ++ .../_share_operations_async.py | 1315 ++++++ .../v2020_02_10/_generated/models/__init__.py | 123 + .../models/_azure_file_storage_enums.py | 135 + .../v2020_02_10/_generated/models/_models.py | 1021 +++++ .../_generated/models/_models_py3.py | 1021 +++++ .../_generated/operations/__init__.py | 22 + .../operations/_directory_operations.py | 672 ++++ .../_generated/operations/_file_operations.py | 1670 ++++++++ .../operations/_service_operations.py | 253 ++ .../operations/_share_operations.py | 1315 ++++++ .../v2020_02_10/_generated/version.py | 13 + .../storagev2/fileshare/v2020_02_10/_lease.py | 237 ++ .../fileshare/v2020_02_10/_models.py | 965 +++++ .../fileshare/v2020_02_10/_parser.py | 42 + .../fileshare/v2020_02_10/_serialize.py | 113 + .../fileshare/v2020_02_10/_share_client.py | 806 ++++ .../v2020_02_10/_share_service_client.py | 415 ++ .../fileshare/v2020_02_10/_shared/__init__.py | 56 + .../v2020_02_10/_shared/authentication.py | 142 + .../v2020_02_10/_shared/base_client.py | 437 ++ .../v2020_02_10/_shared/base_client_async.py | 179 + .../v2020_02_10/_shared/constants.py | 26 + .../v2020_02_10/_shared/encryption.py | 542 +++ .../fileshare/v2020_02_10/_shared/models.py | 468 +++ .../fileshare/v2020_02_10/_shared/parser.py | 20 + .../fileshare/v2020_02_10/_shared/policies.py | 610 +++ .../v2020_02_10/_shared/policies_async.py | 220 + .../v2020_02_10/_shared/request_handlers.py | 147 + .../v2020_02_10/_shared/response_handlers.py | 159 + .../_shared/shared_access_signature.py | 220 + .../fileshare/v2020_02_10/_shared/uploads.py | 550 +++ .../v2020_02_10/_shared/uploads_async.py | 351 ++ .../v2020_02_10/_shared_access_signature.py | 491 +++ .../fileshare/v2020_02_10/_version.py | 7 + .../fileshare/v2020_02_10/aio/__init__.py | 20 + .../aio/_directory_client_async.py | 593 +++ .../v2020_02_10/aio/_download_async.py | 467 +++ .../v2020_02_10/aio/_file_client_async.py | 1197 ++++++ .../fileshare/v2020_02_10/aio/_lease_async.py | 229 ++ .../fileshare/v2020_02_10/aio/_models.py | 178 + .../v2020_02_10/aio/_share_client_async.py | 664 +++ .../aio/_share_service_client_async.py | 368 ++ .../storagev2/queue/v2018_03_28/_models.py | 2 +- .../queue/v2018_03_28/_queue_client.py | 2 +- .../v2018_03_28/_shared/authentication.py | 18 +- .../queue/v2018_03_28/_shared/base_client.py | 17 +- .../v2018_03_28/_shared/base_client_async.py | 2 + .../queue/v2018_03_28/_shared/models.py | 34 +- .../queue/v2018_03_28/_shared/policies.py | 2 +- .../v2018_03_28/_shared/policies_async.py | 1 + .../queue/v2018_03_28/_shared/uploads.py | 6 +- .../storagev2/queue/v2018_03_28/_version.py | 2 +- .../v2018_03_28/aio/_queue_client_async.py | 1 + .../aio/_queue_service_client_async.py | 1 + setup.py | 2 +- 202 files changed, 87594 insertions(+), 25 deletions(-) create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_download.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_lease.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_models.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/_version.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py create mode 100644 azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py create mode 100644 azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py create mode 100644 azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py diff --git a/README.rst b/README.rst index c8ee2f4..f400594 100644 --- a/README.rst +++ b/README.rst @@ -17,6 +17,16 @@ Handles multi-API versions of Azure Storage Data Plane originally from https://g Change Log ---------- +0.5.0 ++++++ +* Add new api support for azure-multiapi-storagev2: + - blob + - v2020-02-10 + - filedatalake + - v2020-02-10 + - fileshare + - v2020-02-10 + 0.4.1 +++++ * Add tags support for blob diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/__init__.py new file mode 100644 index 0000000..937d74b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/__init__.py @@ -0,0 +1,229 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import os + +from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import +from ._version import VERSION +from ._blob_client import BlobClient +from ._container_client import ContainerClient +from ._blob_service_client import BlobServiceClient +from ._lease import BlobLeaseClient +from ._download import StorageStreamDownloader +from ._quick_query_helper import BlobQueryReader +from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.response_handlers import PartialBatchErrorException +from ._shared.models import( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode, + UserDelegationKey +) +from ._generated.models import ( + RehydratePriority +) +from ._models import ( + BlobType, + BlockState, + StandardBlobTier, + PremiumPageBlobTier, + SequenceNumberAction, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + RetentionPolicy, + StaticWebsite, + CorsRule, + ContainerProperties, + BlobProperties, + FilteredBlob, + LeaseProperties, + ContentSettings, + CopyProperties, + BlobBlock, + PageRange, + AccessPolicy, + ContainerSasPermissions, + BlobSasPermissions, + CustomerProvidedEncryptionKey, + ContainerEncryptionScope, + BlobQueryError, + DelimitedJsonDialect, + DelimitedTextDialect, + ArrowDialect, + ArrowType, + ObjectReplicationPolicy, + ObjectReplicationRule +) +from ._list_blobs_helper import BlobPrefix + +__version__ = VERSION + + +def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Any + **kwargs): + # type: (...) -> Dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = client.download_blob(**kwargs) + stream.readinto(handle) + + +def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Any + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream. + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobType', + 'BlobLeaseClient', + 'StorageErrorCode', + 'UserDelegationKey', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'BlockState', + 'StandardBlobTier', + 'PremiumPageBlobTier', + 'SequenceNumberAction', + 'PublicAccess', + 'BlobAnalyticsLogging', + 'Metrics', + 'RetentionPolicy', + 'StaticWebsite', + 'CorsRule', + 'ContainerProperties', + 'BlobProperties', + 'BlobPrefix', + 'FilteredBlob', + 'LeaseProperties', + 'ContentSettings', + 'CopyProperties', + 'BlobBlock', + 'PageRange', + 'AccessPolicy', + 'ContainerSasPermissions', + 'BlobSasPermissions', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageStreamDownloader', + 'CustomerProvidedEncryptionKey', + 'RehydratePriority', + 'generate_account_sas', + 'generate_container_sas', + 'generate_blob_sas', + 'PartialBatchErrorException', + 'ContainerEncryptionScope', + 'BlobQueryError', + 'DelimitedJsonDialect', + 'DelimitedTextDialect', + 'ArrowDialect', + 'ArrowType', + 'BlobQueryReader', + 'ObjectReplicationPolicy', + 'ObjectReplicationRule' +] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py new file mode 100644 index 0000000..7daace2 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py @@ -0,0 +1,3581 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines,no-self-use + +from io import BytesIO +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TYPE_CHECKING +) +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.tracing.decorator import distributed_trace +from azure.core.exceptions import ResourceNotFoundError + +from ._shared import encode_base64 +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query +from ._shared.encryption import generate_blob_encryption_data +from ._shared.uploads import IterStreamer +from ._shared.request_handlers import ( + add_metadata_headers, get_length, read_length, + validate_and_format_range_headers) +from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized +from ._generated import AzureBlobStorage, VERSION +from ._generated.models import ( # pylint: disable=unused-import + DeleteSnapshotsOptionType, + BlobHTTPHeaders, + BlockLookupList, + AppendPositionAccessConditions, + SequenceNumberAccessConditions, + StorageErrorException, + QueryRequest, + CpkInfo) +from ._serialize import ( + get_modify_conditions, + get_source_conditions, + get_cpk_scope_info, + get_api_version, + serialize_blob_tags_header, + serialize_blob_tags, + serialize_query_format, get_access_conditions +) +from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags +from ._quick_query_helper import BlobQueryReader +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob) +from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError +from ._download import StorageStreamDownloader +from ._lease import BlobLeaseClient + +if TYPE_CHECKING: + from datetime import datetime + from ._generated.models import BlockList + from ._models import ( # pylint: disable=unused-import + ContentSettings, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + if not (container_name and blob_name): + raise ValueError("Please specify a container name and blob name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot, sas_token = parse_query(parsed_url.query) + + self.container_name = container_name + self.blob_name = blob_name + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) + super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, six.text_type): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + quote(self.blob_name, safe='~/'), + self._query_str) + + def _encode_source_url(self, source_url): + parsed_source_url = urlparse(source_url) + source_scheme = parsed_source_url.scheme + source_hostname = parsed_source_url.netloc.rstrip('/') + source_path = unquote(parsed_source_url.path) + source_query = parsed_source_url.query + result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] + if source_query: + result.append(source_query) + return '?'.join(result) + + @classmethod + def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): + # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient + """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. + + :param str blob_url: + The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type blob_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. If specified, this will override + the snapshot in the url. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + """ + try: + if not blob_url.lower().startswith('http'): + blob_url = "https://" + blob_url + except AttributeError: + raise ValueError("Blob URL must be a string.") + parsed_url = urlparse(blob_url.rstrip('/')) + + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(blob_url)) + + account_path = "" + if ".core." in parsed_url.netloc: + # .core. is indicating non-customized url. Blob name with directory info can also be parsed. + path_blob = parsed_url.path.lstrip('/').split('/', 1) + elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: + path_blob = parsed_url.path.lstrip('/').split('/', 2) + account_path += path_blob[0] + else: + # for customized url. blob name that has directory info cannot be parsed. + path_blob = parsed_url.path.lstrip('/').split('/') + if len(path_blob) > 2: + account_path = "/" + "/".join(path_blob[:-2]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) + if not container_name or not blob_name: + raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") + + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=path_snapshot, credential=credential, **kwargs + ) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> BlobClient + """Create BlobClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_blob] + :end-before: [END auth_from_connection_string_blob] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=snapshot, credential=credential, **kwargs + ) + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _upload_blob_options( # pylint:disable=too-many-statements + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + encryption_options = { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function, + } + if self.key_encryption_key is not None: + cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) + encryption_options['cek'] = cek + encryption_options['vector'] = iv + encryption_options['data'] = encryption_data + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, six.text_type): + data = data.encode(encoding) # type: ignore + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + overwrite = kwargs.pop('overwrite', False) + max_concurrency = kwargs.pop('max_concurrency', 1) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + kwargs['cpk_info'] = cpk_info + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) + kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) + if content_settings: + kwargs['blob_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['overwrite'] = overwrite + kwargs['headers'] = headers + kwargs['validate_content'] = validate_content + kwargs['blob_settings'] = self._config + kwargs['max_concurrency'] = max_concurrency + kwargs['encryption_options'] = encryption_options + if blob_type == BlobType.BlockBlob: + kwargs['client'] = self._client.block_blob + kwargs['data'] = data + elif blob_type == BlobType.PageBlob: + kwargs['client'] = self._client.page_blob + elif blob_type == BlobType.AppendBlob: + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + kwargs['client'] = self._client.append_blob + else: + raise ValueError("Unsupported BlobType: {}".format(blob_type)) + return kwargs + + @distributed_trace + def upload_blob( # pylint: disable=too-many-locals + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Any + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 12 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return upload_page_blob(**options) + return upload_append_blob(**options) + + def _download_blob_options(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Service actually uses an end-range inclusive index + + validate_content = kwargs.pop('validate_content', False) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'clients': self._client, + 'config': self._config, + 'start_range': offset, + 'end_range': length, + 'version_id': kwargs.pop('version_id', None), + 'validate_content': validate_content, + 'encryption_options': { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function}, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, + 'max_concurrency':kwargs.pop('max_concurrency', 1), + 'encoding': kwargs.pop('encoding', None), + 'timeout': kwargs.pop('timeout', None), + 'name': self.blob_name, + 'container': self.container_name} + options.update(kwargs) + return options + + @distributed_trace + def download_blob(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 12 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + **kwargs) + return StorageStreamDownloader(**options) + + def _quick_query_options(self, query_expression, + **kwargs): + # type: (str, **Any) -> Dict[str, Any] + delimiter = '\n' + input_format = kwargs.pop('blob_format', None) + if input_format: + try: + delimiter = input_format.lineterminator + except AttributeError: + try: + delimiter = input_format.delimiter + except AttributeError: + raise ValueError("The Type of blob_format can only be DelimitedTextDialect or DelimitedJsonDialect") + output_format = kwargs.pop('output_format', None) + if output_format: + try: + delimiter = output_format.lineterminator + except AttributeError: + try: + delimiter = output_format.delimiter + except AttributeError: + pass + else: + output_format = input_format + query_request = QueryRequest( + expression=query_expression, + input_serialization=serialize_query_format(input_format), + output_serialization=serialize_query_format(output_format) + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo( + encryption_key=cpk.key_value, + encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm + ) + options = { + 'query_request': query_request, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'snapshot': self.snapshot, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized, + } + options.update(kwargs) + return options, delimiter + + @distributed_trace + def query_blob(self, query_expression, **kwargs): + # type: (str, **Any) -> BlobQueryReader + """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. + This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: + A function to be called on any processing errors returned by the service. + :keyword blob_format: + Optional. Defines the serialization of the data currently stored in the blob. The default is to + treat the blob data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. + :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the blob. By providing an output format, the blob data will be reformatted + according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. + :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect, ~azure.storage.blob.DelimitedJsonDialect + or list[~azure.storage.blob.ArrowDialect] + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (BlobQueryReader) + :rtype: ~azure.storage.blob.BlobQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on blob/or blob snapshot data by providing simple query expressions. + """ + errors = kwargs.pop("on_error", None) + error_cls = kwargs.pop("error_cls", BlobQueryError) + encoding = kwargs.pop("encoding", None) + options, delimiter = self._quick_query_options(query_expression, **kwargs) + try: + headers, raw_response_body = self._client.blob.query(**options) + except StorageErrorException as error: + process_storage_error(error) + return BlobQueryReader( + name=self.blob_name, + container=self.container_name, + errors=errors, + record_delimiter=delimiter, + encoding=encoding, + headers=headers, + response=raw_response_body, + error_cls=error_cls) + + @staticmethod + def _generic_delete_blob_options(delete_snapshots=False, **kwargs): + # type: (bool, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if delete_snapshots: + delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) + options = { + 'timeout': kwargs.pop('timeout', None), + 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs + 'delete_snapshots': delete_snapshots or None, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions} + options.update(kwargs) + return options + + def _delete_blob_options(self, delete_snapshots=False, **kwargs): + # type: (bool, **Any) -> Dict[str, Any] + if self.snapshot and delete_snapshots: + raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") + options = self._generic_delete_blob_options(delete_snapshots, **kwargs) + options['snapshot'] = self.snapshot + options['version_id'] = kwargs.pop('version_id', None) + return options + + @distributed_trace + def delete_blob(self, delete_snapshots=False, **kwargs): + # type: (str, **Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 12 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + self._client.blob.delete(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def undelete_blob(self, **kwargs): + # type: (**Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 8 + :caption: Undeleting a blob. + """ + try: + self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace() + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :param str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :param int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + except StorageErrorException as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def get_blob_properties(self, **kwargs): + # type: (**Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a blob. + """ + # TODO: extract this out as _get_blob_properties_options + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + blob_props = self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + def _set_http_headers_options(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + options = { + 'timeout': kwargs.pop('timeout', None), + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return self._client.blob.set_http_headers(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _set_blob_metadata_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return self._client.blob.set_metadata(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _create_page_blob_options( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + sequence_number = kwargs.pop('sequence_number', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + if premium_page_blob_tier: + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore + + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_content_length': size, + 'blob_sequence_number': sequence_number, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return self._client.page_blob.create(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.append_blob.create(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _create_snapshot_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 8 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return self._client.blob.create_snapshot(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + if 'source_lease' in kwargs: + source_lease = kwargs.pop('source_lease') + try: + headers['x-ms-source-lease-id'] = source_lease.id # type: str + except AttributeError: + headers['x-ms-source-lease-id'] = source_lease + + tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) + + if kwargs.get('requires_sync'): + headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) + + timeout = kwargs.pop('timeout', None) + dest_mod_conditions = get_modify_conditions(kwargs) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'copy_source': source_url, + 'seal_blob': kwargs.pop('seal_destination_blob', None), + 'timeout': timeout, + 'modified_access_conditions': dest_mod_conditions, + 'blob_tags_string': blob_tags_string, + 'headers': headers, + 'cls': return_response_headers, + } + if not incremental_copy: + source_mod_conditions = get_source_conditions(kwargs) + dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) + options['source_modified_access_conditions'] = source_mod_conditions + options['lease_access_conditions'] = dest_access_conditions + options['tier'] = tier.value if tier else None + options.update(kwargs) + return options + + @distributed_trace + def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] + """Copies a blob asynchronously. + + This operation returns a copy operation + object that can be used to wait on the completion of the operation, + as well as check status or abort the copy operation. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call status() on the returned polling object + to check the status of the copy operation, or wait() to block until the + operation is complete. The final blob will be committed when the copy completes. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return self._client.page_blob.copy_incremental(**options) + return self._client.blob.start_copy_from_url(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _abort_copy_options(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + options = { + 'copy_id': copy_id, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID string, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + self._client.blob.abort_copy_from_url(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace + def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + if self.snapshot and kwargs.get('version_id'): + raise ValueError("Snapshot and version_id cannot be set at the same time") + try: + self._client.blob.set_tier( + tier=standard_blob_tier, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + def _stage_block_options( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_id = encode_base64(str(block_id)) + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'block_id': block_id, + 'content_length': length, + 'body': data, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob. + + :param str block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the block_id parameter must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return self._client.block_blob.stage_block(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _stage_block_from_url_options( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if source_length is not None and source_offset is None: + raise ValueError("Source offset value must not be None if length is set.") + if source_length is not None: + source_length = source_offset + source_length - 1 + block_id = encode_base64(str(block_id)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + range_header = None + if source_offset is not None: + range_header, _ = validate_and_format_range_headers(source_offset, source_length) + + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'block_id': block_id, + 'content_length': 0, + 'source_url': source_url, + 'source_range': range_header, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block_from_url( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the block_id parameter must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return self._client.block_blob.stage_block_from_url(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _get_block_list_result(self, blocks): + # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] + committed = [] # type: List + uncommitted = [] # type: List + if blocks.committed_blocks: + committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access + if blocks.uncommitted_blocks: + uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access + return committed, uncommitted + + @distributed_trace + def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + def _commit_block_list_options( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + for block in block_list: + try: + if block.state.value == 'committed': + block_lookup.committed.append(encode_base64(str(block.id))) + elif block.state.value == 'uncommitted': + block_lookup.uncommitted.append(encode_base64(str(block.id))) + else: + block_lookup.latest.append(encode_base64(str(block.id))) + except AttributeError: + block_lookup.latest.append(encode_base64(str(block))) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + blob_headers = None + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'blocks': block_lookup, + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'tier': tier.value if tier else None, + 'blob_tags_string': blob_tags_string, + 'headers': headers + } + options.update(kwargs) + return options + + @distributed_trace + def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.block_blob.commit_block_list(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTier must be specified") + try: + self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + def _set_blob_tags_options(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + tags = serialize_blob_tags(tags) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'tags': tags, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return self._client.blob.set_tags(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _get_blob_tags_options(self, **kwargs): + # type: (**Any) -> Dict[str, str] + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'version_id': kwargs.pop('version_id', None), + 'snapshot': self.snapshot, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized} + return options + + @distributed_trace + def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except StorageErrorException as error: + process_storage_error(error) + + def _get_page_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Reformat to an inclusive range index + page_range, _ = validate_and_format_range_headers( + offset, length, start_range_required=False, end_range_required=False, align_to_page=True + ) + options = { + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': page_range} + if previous_snapshot_diff: + try: + options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore + except AttributeError: + try: + options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore + except TypeError: + options['prevsnapshot'] = previous_snapshot_diff + options.update(kwargs) + return options + + @distributed_trace + def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = self._client.page_blob.get_page_ranges(**options) + except StorageErrorException as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace + def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + except StorageErrorException as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if sequence_number_action is None: + raise ValueError("A sequence number action must be specified") + options = { + 'sequence_number_action': sequence_number_action, + 'timeout': kwargs.pop('timeout', None), + 'blob_sequence_number': sequence_number, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return self._client.page_blob.update_sequence_number(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _resize_blob_options(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if size is None: + raise ValueError("A content length must be specified for a Page Blob.") + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'blob_content_length': size, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def resize_blob(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return self._client.page_blob.resize(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _upload_page_options( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if isinstance(page, six.text_type): + page = page.encode(kwargs.pop('encoding', 'UTF-8')) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + validate_content = kwargs.pop('validate_content', False) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': page[:length], + 'content_length': length, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return self._client.page_blob.upload_pages(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _upload_pages_from_url_options( # type: ignore + self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # TODO: extract the code to a method format_range + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + if source_offset is None or offset % 512 != 0: + raise ValueError("source_offset must be an integer that aligns with 512 page size") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? + + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + source_content_md5 = kwargs.pop('source_content_md5', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'source_url': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _clear_page_options(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'content_length': 0, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def clear_page(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return self._client.page_blob.clear_pages(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _append_block_options( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if length == 0: + return {} + if isinstance(data, bytes): + data = data[:length] + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + validate_content = kwargs.pop('validate_content', False) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': data, + 'content_length': length, + 'timeout': kwargs.pop('timeout', None), + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def append_block( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return self._client.append_blob.append_block(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _append_block_from_url_options( # type: ignore + self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # If end range is provided, start range must be provided + if source_length is not None and source_offset is None: + raise ValueError("source_offset should also be specified if source_length is specified") + # Format based on whether length is present + source_range = None + if source_length is not None: + end_range = source_offset + source_length - 1 + source_range = 'bytes={0}-{1}'.format(source_offset, end_range) + elif source_offset is not None: + source_range = "bytes={0}-".format(source_offset) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + source_content_md5 = kwargs.pop('source_content_md5', None) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'source_url': copy_source_url, + 'content_length': 0, + 'source_range': source_range, + 'source_content_md5': source_content_md5, + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return self._client.append_blob.append_block_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _seal_append_blob_options(self, **kwargs): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + append_conditions = None + if appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return self._client.append_blob.seal(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py new file mode 100644 index 0000000..f68a680 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py @@ -0,0 +1,686 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace + +from ._shared.models import LocationMode +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.parser import _to_utc_datetime +from ._shared.response_handlers import return_response_headers, process_storage_error, \ + parse_to_internal_user_delegation_key +from ._generated import AzureBlobStorage, VERSION +from ._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo +from ._container_client import ContainerClient +from ._blob_client import BlobClient +from ._models import ContainerPropertiesPaged, FilteredBlobPaged +from ._serialize import get_api_version +from ._deserialize import service_stats_deserialize, service_properties_deserialize + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.pipeline.transport import HttpTransport + from azure.core.pipeline.policies import HTTPPolicy + from ._shared.models import UserDelegationKey + from ._lease import BlobLeaseClient + from ._models import ( + ContainerProperties, + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + ) + + +class BlobServiceClient(StorageAccountHostsMixin): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self._query_str, credential = self._format_query_string(sas_token, credential) + super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> BlobServiceClient + """Create BlobServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A Blob service client. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string] + :end-before: [END auth_from_connection_string] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 8 + :caption: Getting account information for the blob service. + """ + try: + return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_service_stats(self, **kwargs): + # type: (**Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 8 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 8 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 8 + :caption: Setting service properties for the blob service. + """ + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + self._client.service.set_properties(props, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 12 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> ItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace + def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 12 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace + def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 12 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace + def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword str new_name: + The new name for the deleted container to be restored to. + If not specified deleted_container_name will be used as the restored container name. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + container = self.get_container_client(new_name or deleted_container_name) + try: + container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except StorageErrorException as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 8 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 12 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py new file mode 100644 index 0000000..ba327c2 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py @@ -0,0 +1,1448 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, + TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six + +from azure.core import MatchConditions +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import HttpRequest + +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized) +from ._generated import AzureBlobStorage, VERSION +from ._generated.models import ( + StorageErrorException, + SignedIdentifier) +from ._deserialize import deserialize_container_properties +from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from ._models import ( # pylint: disable=unused-import + ContainerProperties, + BlobProperties, + BlobType) +from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged +from ._lease import BlobLeaseClient +from ._blob_client import BlobClient + +if TYPE_CHECKING: + from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports + from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports + from datetime import datetime + from ._models import ( # pylint: disable=unused-import + PublicAccess, + AccessPolicy, + ContentSettings, + StandardBlobTier, + PremiumPageBlobTier) + + +def _get_blob_name(blob): + """Return the blob name. + + :param blob: A blob string or BlobProperties + :rtype: str + """ + try: + return blob.get('name') + except AttributeError: + return blob + + +class ContainerClient(StorageAccountHostsMixin): + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 8 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not container_name: + raise ValueError("Please specify a container name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self.container_name = container_name + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, six.text_type): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + self._query_str) + + @classmethod + def from_container_url(cls, container_url, credential=None, **kwargs): + # type: (str, Optional[Any], Any) -> ContainerClient + """Create ContainerClient from a container url. + + :param str container_url: + The full endpoint URL to the Container, including SAS token if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type container_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + """ + try: + if not container_url.lower().startswith('http'): + container_url = "https://" + container_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(container_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(container_url)) + + container_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(container_path) > 1: + account_path = "/" + "/".join(container_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name = unquote(container_path[-1]) + if not container_name: + raise ValueError("Invalid URL. Please provide a URL with a valid container name") + return cls(account_url, container_name=container_name, credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> ContainerClient + """Create ContainerClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: + The container name for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_container] + :end-before: [END auth_from_connection_string_container] + :language: python + :dedent: 8 + :caption: Creating the ContainerClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, credential=credential, **kwargs) + + @distributed_trace + def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 12 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + timeout = kwargs.pop('timeout', None) + headers.update(add_metadata_headers(metadata)) # type: ignore + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 12 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_container_properties(self, **kwargs): + # type: (Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace + def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 12 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 12 + :caption: Setting access policy on the container. + """ + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + lease = kwargs.pop('lease', None) + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] or str include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 8 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Any] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace + def upload_blob( + self, name, # type: Union[str, BlobProperties] + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> BlobClient + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 8 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace + def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + blob_client.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @distributed_trace + def download_blob(self, blob, offset=None, length=None, **kwargs): + # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return blob_client.download_blob(offset=offset, length=length, **kwargs) + + def _generate_delete_blobs_subrequest_options( + self, snapshot=None, + delete_snapshots=None, + lease_access_conditions=None, + modified_access_conditions=None, + **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct parameters + timeout = kwargs.pop('timeout', None) + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + + # Construct headers + header_parameters = {} + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access + "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access + "lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_delete_blobs_options(self, + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + delete_snapshots = kwargs.pop('delete_snapshots', None) + if_modified_since = kwargs.pop('if_modified_since', None) + if_unmodified_since = kwargs.pop('if_unmodified_since', None) + if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "" + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + snapshot=blob.get('snapshot'), + delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), + lease=blob.get('lease_id'), + if_modified_since=if_modified_since or blob.get('if_modified_since'), + if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), + etag=blob.get('etag'), + if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), + match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') + else None, + timeout=blob.get('timeout'), + ) + except AttributeError: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + delete_snapshots=delete_snapshots, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_tags_match_condition=if_tags_match_condition + ) + + query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) + + req = HttpRequest( + "DELETE", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def delete_blobs(self, *blobs, **kwargs): + # type: (...) -> Iterator[HttpResponse] + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 8 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def _generate_set_tiers_subrequest_options( + self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + if not tier: + raise ValueError("A blob tier must be specified") + if snapshot and version_id: + raise ValueError("Snapshot and version_id cannot be set at the same time") + if_tags = kwargs.pop('if_tags', None) + + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "tier" + timeout = kwargs.pop('timeout', None) + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if version_id is not None: + query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + + # Construct headers + header_parameters = {} + header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access + "rehydrate_priority", rehydrate_priority, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_set_tiers_options(self, + blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + rehydrate_priority = kwargs.pop('rehydrate_priority', None) + if_tags = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "" + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + tier = blob_tier or blob.get('blob_tier') + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + tier=tier, + snapshot=blob.get('snapshot'), + version_id=blob.get('version_id'), + rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), + lease_access_conditions=blob.get('lease_id'), + if_tags=if_tags or blob.get('if_tags_match_condition'), + timeout=timeout or blob.get('timeout') + ) + except AttributeError: + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) + + req = HttpRequest( + "PUT", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def set_standard_blob_tier_blobs( + self, + standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + # type: (...) -> Iterator[HttpResponse] + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + snapshot: + key: "snapshost", value type: str + version id: + key: "version_id", value type: str + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + @distributed_trace + def set_premium_page_blob_tier_blobs( + self, + premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + # type: (...) -> Iterator[HttpResponse] + """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[str, BlobProperties] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 8 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py new file mode 100644 index 0000000..159e0e6 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py @@ -0,0 +1,158 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Tuple, Dict, List, + TYPE_CHECKING +) + +from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties +from ._shared.models import get_enum_value + +from ._shared.response_handlers import deserialize_metadata +from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ + StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule + +if TYPE_CHECKING: + from ._generated.models import PageList + + +def deserialize_blob_properties(response, obj, headers): + blob_properties = BlobProperties( + metadata=deserialize_metadata(response, obj, headers), + object_replication_source_properties=deserialize_ors_policies(response.headers), + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + blob_properties.content_settings.content_md5 = None + return blob_properties + + +def deserialize_ors_policies(policy_dictionary): + + if policy_dictionary is None: + return None + # For source blobs (blobs that have policy ids and rule ids applied to them), + # the header will be formatted as "x-ms-or-_: {Complete, Failed}". + # The value of this header is the status of the replication. + or_policy_status_headers = {key: val for key, val in policy_dictionary.items() + if 'or-' in key and key != 'x-ms-or-policy-id'} + + parsed_result = {} + + for key, val in or_policy_status_headers.items(): + # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule + policy_and_rule_ids = key.split('or-')[1].split('_') + policy_id = policy_and_rule_ids[0] + rule_id = policy_and_rule_ids[1] + + # If we are seeing this policy for the first time, create a new list to store rule_id -> result + parsed_result[policy_id] = parsed_result.get(policy_id) or list() + parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) + + result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] + + return result_list + + +def deserialize_blob_stream(response, obj, headers): + blob_properties = deserialize_blob_properties(response, obj, headers) + obj.properties = blob_properties + return response.location_mode, obj + + +def deserialize_container_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + container_properties = ContainerProperties( + metadata=metadata, + **headers + ) + return container_properties + + +def get_page_ranges_result(ranges): + # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + page_range = [] # type: ignore + clear_range = [] # type: List + if ranges.page_range: + page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore + if ranges.clear_range: + clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] + return page_range, clear_range # type: ignore + + +def service_stats_deserialize(generated): + """Deserialize a ServiceStats objects into a dict. + """ + return { + 'geo_replication': { + 'status': generated.geo_replication.status, + 'last_sync_time': generated.geo_replication.last_sync_time, + } + } + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'target_version': generated.default_service_version, # pylint: disable=protected-access + 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access + 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access + } + + +def get_blob_properties_from_generated_code(generated): + blob = BlobProperties() + blob.name = generated.name + blob_type = get_enum_value(generated.properties.blob_type) + blob.blob_type = BlobType(blob_type) if blob_type else None + blob.etag = generated.properties.etag + blob.deleted = generated.deleted + blob.snapshot = generated.snapshot + blob.is_append_blob_sealed = generated.properties.is_sealed + blob.metadata = generated.metadata.additional_properties if generated.metadata else {} + blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None + blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access + blob.last_modified = generated.properties.last_modified + blob.creation_time = generated.properties.creation_time + blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access + blob.size = generated.properties.content_length + blob.page_blob_sequence_number = generated.properties.blob_sequence_number + blob.server_encrypted = generated.properties.server_encrypted + blob.encryption_scope = generated.properties.encryption_scope + blob.deleted_time = generated.properties.deleted_time + blob.remaining_retention_days = generated.properties.remaining_retention_days + blob.blob_tier = generated.properties.access_tier + blob.rehydrate_priority = generated.properties.rehydrate_priority + blob.blob_tier_inferred = generated.properties.access_tier_inferred + blob.archive_status = generated.properties.archive_status + blob.blob_tier_change_time = generated.properties.access_tier_change_time + blob.version_id = generated.version_id + blob.is_current_version = generated.is_current_version + blob.tag_count = generated.properties.tag_count + blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access + blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) + blob.last_accessed_on = generated.properties.last_accessed_on + return blob + + +def parse_tags(generated_tags): + # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] + """Deserialize a list of BlobTag objects into a dict. + """ + if generated_tags: + tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} + return tag_dict + return None diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_download.py b/azure/multiapi/storagev2/blob/v2020_02_10/_download.py new file mode 100644 index 0000000..46e59e5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_download.py @@ -0,0 +1,580 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import warnings +from io import BytesIO + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.common import with_current_context +from ._shared.encryption import decrypt_blob +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range +from ._deserialize import get_page_ranges_result + + +def process_range_and_offset(start_range, end_range, length, encryption): + start_offset, end_offset = 0, 0 + if encryption.get("key") is not None or encryption.get("resolver") is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = b"".join(list(data)) + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + non_empty_ranges=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + **kwargs + ): + self.client = client + self.non_empty_ranges = non_empty_ranges + + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _do_optimize(self, given_range_start, given_range_end): + # If we have no page range list stored, then assume there's data everywhere for that page blob + # or it's a block blob or append blob + if self.non_empty_ranges is None: + return False + + for source_range in self.non_empty_ranges: + # Case 1: As the range list is sorted, if we've reached such a source_range + # we've checked all the appropriate source_range already and haven't found any overlapping. + # so the given range doesn't have any data and download optimization could be applied. + # given range: | | + # source range: | | + if given_range_end < source_range['start']: # pylint:disable=no-else-return + return True + # Case 2: the given range comes after source_range, continue checking. + # given range: | | + # source range: | | + elif source_range['end'] < given_range_start: + pass + # Case 3: source_range and given range overlap somehow, no need to optimize. + else: + return False + # Went through all src_ranges, but nothing overlapped. Optimization will be applied. + return True + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get("modified_access_conditions"): + self.request_options["modified_access_conditions"].if_match = response.properties.etag + + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader): + self.size = size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # If no iterator was supplied, the download completed with + # the initial GET, so we just return that data + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + else: + chunk = next(self._iter_chunks) + self._current_content = self._iter_downloader.yield_chunk(chunk) + + return self._current_content + + next = __next__ # Python 2 compatibility. + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if specified, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + try: + location_mode, response = self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + # according to the REST API documentation: + # in a highly fragmented page blob with a large number of writes, + # a Get Page Ranges request can fail due to an internal server timeout. + # thus, if the page blob is not sparse, it's ok for it to fail + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size != self.size: + # Lock on the etag. This can be overriden by the user by specifying '*' + if self._request_options.get("modified_access_conditions"): + if not self._request_options["modified_access_conditions"].if_match: + self._request_options["modified_access_conditions"].if_match = response.properties.etag + else: + self._download_complete = True + return response + + def chunks(self): + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader) + + def readall(self): + """Download the contents of this blob. + + This operation is blocking until all data is downloaded. + + :rtype: bytes or str + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + if parallel: + import concurrent.futures + executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py new file mode 100644 index 0000000..f5c8f4a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage +__all__ = ['AzureBlobStorage'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py new file mode 100644 index 0000000..831f6ce --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import PipelineClient +from msrest import Serializer, Deserializer + +from ._configuration import AzureBlobStorageConfiguration +from azure.core.exceptions import map_error +from .operations import ServiceOperations +from .operations import ContainerOperations +from .operations import DirectoryOperations +from .operations import BlobOperations +from .operations import PageBlobOperations +from .operations import AppendBlobOperations +from .operations import BlockBlobOperations +from . import models + + +class AzureBlobStorage(object): + """AzureBlobStorage + + + :ivar service: Service operations + :vartype service: azure.storage.blob.operations.ServiceOperations + :ivar container: Container operations + :vartype container: azure.storage.blob.operations.ContainerOperations + :ivar directory: Directory operations + :vartype directory: azure.storage.blob.operations.DirectoryOperations + :ivar blob: Blob operations + :vartype blob: azure.storage.blob.operations.BlobOperations + :ivar page_blob: PageBlob operations + :vartype page_blob: azure.storage.blob.operations.PageBlobOperations + :ivar append_blob: AppendBlob operations + :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations + :ivar block_blob: BlockBlob operations + :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + """ + + def __init__(self, url, **kwargs): + + base_url = '{url}' + self._config = AzureBlobStorageConfiguration(url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-02-10' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py new file mode 100644 index 0000000..c8a1875 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .version import VERSION + + +class AzureBlobStorageConfiguration(Configuration): + """Configuration for AzureBlobStorage + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :ivar version: Specifies the version of the operation to use for this + request. + :type version: str + """ + + def __init__(self, url, **kwargs): + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) + self.generate_client_request_id = True + + self.url = url + self.version = "2020-02-10" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py new file mode 100644 index 0000000..009c965 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage_async import AzureBlobStorage +__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py new file mode 100644 index 0000000..367e296 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import AsyncPipelineClient +from msrest import Serializer, Deserializer + +from ._configuration_async import AzureBlobStorageConfiguration +from azure.core.exceptions import map_error +from .operations_async import ServiceOperations +from .operations_async import ContainerOperations +from .operations_async import DirectoryOperations +from .operations_async import BlobOperations +from .operations_async import PageBlobOperations +from .operations_async import AppendBlobOperations +from .operations_async import BlockBlobOperations +from .. import models + + +class AzureBlobStorage(object): + """AzureBlobStorage + + + :ivar service: Service operations + :vartype service: azure.storage.blob.aio.operations_async.ServiceOperations + :ivar container: Container operations + :vartype container: azure.storage.blob.aio.operations_async.ContainerOperations + :ivar directory: Directory operations + :vartype directory: azure.storage.blob.aio.operations_async.DirectoryOperations + :ivar blob: Blob operations + :vartype blob: azure.storage.blob.aio.operations_async.BlobOperations + :ivar page_blob: PageBlob operations + :vartype page_blob: azure.storage.blob.aio.operations_async.PageBlobOperations + :ivar append_blob: AppendBlob operations + :vartype append_blob: azure.storage.blob.aio.operations_async.AppendBlobOperations + :ivar block_blob: BlockBlob operations + :vartype block_blob: azure.storage.blob.aio.operations_async.BlockBlobOperations + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + """ + + def __init__( + self, url, **kwargs): + + base_url = '{url}' + self._config = AzureBlobStorageConfiguration(url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-02-10' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py new file mode 100644 index 0000000..609cb82 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ..version import VERSION + + +class AzureBlobStorageConfiguration(Configuration): + """Configuration for AzureBlobStorage + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :ivar version: Specifies the version of the operation to use for this + request. + :type version: str + """ + + def __init__(self, url, **kwargs): + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) + self.generate_client_request_id = True + self.accept_language = None + + self.url = url + self.version = "2020-02-10" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py new file mode 100644 index 0000000..dec0519 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations_async import ServiceOperations +from ._container_operations_async import ContainerOperations +from ._directory_operations_async import DirectoryOperations +from ._blob_operations_async import BlobOperations +from ._page_blob_operations_async import PageBlobOperations +from ._append_blob_operations_async import AppendBlobOperations +from ._block_blob_operations_async import BlockBlobOperations + +__all__ = [ + 'ServiceOperations', + 'ContainerOperations', + 'DirectoryOperations', + 'BlobOperations', + 'PageBlobOperations', + 'AppendBlobOperations', + 'BlockBlobOperations', +] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py new file mode 100644 index 0000000..ea79827 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py @@ -0,0 +1,694 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class AppendBlobOperations: + """AppendBlobOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_blob_type = "AppendBlob" + + async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{containerName}/{blob}'} + + async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Append Block operation commits a new block of data to the end of an + existing append blob. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append + Block is supported only on version 2015-02-21 version or later. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + max_size = None + if append_position_access_conditions is not None: + max_size = append_position_access_conditions.max_size + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "appendblock" + + # Construct URL + url = self.append_block.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + append_block.metadata = {'url': '/{containerName}/{blob}'} + + async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """The Append Block operation commits a new block of data to the end of an + existing append blob where the contents are read from a source url. The + Append Block operation is permitted only if the blob was created with + x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param content_length: The length of the request. + :type content_length: long + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range + of bytes that must be read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + max_size = None + if append_position_access_conditions is not None: + max_size = append_position_access_conditions.max_size + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + comp = "appendblock" + + # Construct URL + url = self.append_block_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, *, cls=None, **kwargs): + """The Seal operation seals the Append Blob to make it read-only. Seal is + supported only on version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + + comp = "seal" + + # Construct URL + url = self.seal.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py new file mode 100644 index 0000000..54d6dab --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py @@ -0,0 +1,3067 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class BlobOperations: + """BlobOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_requires_sync: . Constant value: "true". + :ivar x_ms_copy_action: . Constant value: "abort". + :ivar restype: . Constant value: "account". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_requires_sync = "true" + self.x_ms_copy_action = "abort" + self.restype = "account" + + async def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Download operation reads or downloads a blob from the system, + including its metadata and properties. You can also call Download to + read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param range_get_content_md5: When set to true and specified together + with the Range, the service returns the MD5 hash for the range, as + long as the range is less than or equal to 4 MB in size. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified + together with the Range, the service returns the CRC64 hash for the + range, as long as the range is less than or equal to 4 MB in size. + :type range_get_content_crc64: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.download.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if range_get_content_crc64 is not None: + header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + download.metadata = {'url': '/{containerName}/{blob}'} + + async def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Get Properties operation returns all user-defined metadata, + standard HTTP properties, and system properties for the blob. It does + not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), + 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), + 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), + 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), + 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), + 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{containerName}/{blob}'} + + async def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """If the storage account's soft delete feature is disabled then, when a + blob is deleted, it is permanently removed from the storage account. If + the storage account's soft delete feature is enabled, then, when a blob + is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for + the number of days specified by the DeleteRetentionPolicy section of + [Storage service properties] (Set-Blob-Service-Properties.md). After + the specified number of days has passed, the blob's data is permanently + removed from the storage account. Note that you continue to be charged + for the soft-deleted blob's storage until it is permanently removed. + Use the List Blobs API and specify the "include=deleted" query + parameter to discover which blobs and snapshots have been soft deleted. + You can then use the Undelete Blob API to restore a soft-deleted blob. + All other operations on a soft-deleted blob or snapshot causes the + service to return an HTTP status code of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param delete_snapshots: Required if the blob has associated + snapshots. Specify one of the following two options: include: Delete + the base blob and all of its snapshots. only: Delete only the blob's + snapshots and not the blob itself. Possible values include: 'include', + 'only' + :type delete_snapshots: str or + ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{containerName}/{blob}'} + + async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and + directories. The value is a comma-separated list of access control + entries. Each access control entry (ACE) consists of a scope, a type, + a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Get the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the identity values returned in + the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "getAccessControl" + + # Construct URL + url = self.get_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + get_access_control.metadata = {'url': '/{filesystem}/{path}'} + + async def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """Rename a blob/file. By default, the destination is overwritten and if + the destination already exists and has a lease the lease is broken. + This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value + must have the following format: "/{filesysystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_rename_mode: Determines the behavior of the rename + operation. Possible values include: 'legacy', 'posix' + :type path_rename_mode: str or + ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be + stored with the file or directory, in the format of a comma-separated + list of name and value pairs "n1=v1, n2=v2, ...", where each value is + base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled + for the account. This umask restricts permission settings for file and + directory, and will only be applied when default Acl does not exist in + parent directory. If the umask bit has set, it means that the + corresponding permission will be disabled. Otherwise the corresponding + permission will be determined by the permission. A 4-digit octal + notation (e.g. 0022) is supported here. If no umask was specified, a + default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param directory_http_headers: Additional parameters for the operation + :type directory_http_headers: + ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if directory_http_headers is not None: + cache_control = directory_http_headers.cache_control + content_type = None + if directory_http_headers is not None: + content_type = directory_http_headers.content_type + content_encoding = None + if directory_http_headers is not None: + content_encoding = directory_http_headers.content_encoding + content_language = None + if directory_http_headers is not None: + content_language = directory_http_headers.content_language + content_disposition = None + if directory_http_headers is not None: + content_disposition = directory_http_headers.content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + # Construct URL + url = self.rename.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + rename.metadata = {'url': '/{filesystem}/{path}'} + + async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "undelete" + + # Construct URL + url = self.undelete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + undelete.metadata = {'url': '/{containerName}/{blob}'} + + async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs): + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', + 'RelativeToNow', 'Absolute' + :type expiry_options: str or + ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param expires_on: The time to set the blob to expiry + :type expires_on: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "expiry" + + # Construct URL + url = self.set_expiry.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_expiry.metadata = {'url': '/{containerName}/{blob}'} + + async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "properties" + + # Construct URL + url = self.set_http_headers.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_http_headers.metadata = {'url': '/{containerName}/{blob}'} + + async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Set Blob Metadata operation sets user-defined metadata for the + specified blob as one or more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{containerName}/{blob}'} + + async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{containerName}/{blob}'} + + async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{containerName}/{blob}'} + + async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "renew" + + # Construct URL + url = self.renew_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + renew_lease.metadata = {'url': '/{containerName}/{blob}'} + + async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{containerName}/{blob}'} + + async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param break_period: For a break operation, proposed duration the + lease should continue before it is broken, in seconds, between 0 and + 60. This break period is only used if it is shorter than the time + remaining on the lease. If longer, the time remaining on the lease is + used. A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break period. + If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, + and an infinite lease breaks immediately. + :type break_period: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{containerName}/{blob}'} + + async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "snapshot" + + # Construct URL + url = self.create_snapshot.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_snapshot.metadata = {'url': '/{containerName}/{blob}'} + + async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + """The Start Copy From URL operation copies a blob or an internet resource + to a new blob. + + :param copy_source: Specifies the name of the source page blob + snapshot. This value is a URL of up to 2 KB in length that specifies a + page blob snapshot. The value should be URL-encoded as it would appear + in a request URI. The source blob must either be public or must be + authenticated via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which + to rehydrate an archived blob. Possible values include: 'High', + 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. + Service version 2019-12-12 and newer. + :type seal_blob: bool + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + source_if_tags = None + if source_modified_access_conditions is not None: + source_if_tags = source_modified_access_conditions.source_if_tags + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.start_copy_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + """The Copy From URL operation copies a blob or an internet resource to a + new blob. It will not return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob + snapshot. This value is a URL of up to 2 KB in length that specifies a + page blob snapshot. The value should be URL-encoded as it would appear + in a request URI. The source blob must either be public or must be + authenticated via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.copy_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + copy_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + """The Abort Copy From URL operation aborts a pending Copy From URL + operation, and leaves a destination blob with zero length and full + metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id + header of the original Copy Blob operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "copy" + + # Construct URL + url = self.abort_copy_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Set Tier operation sets the tier on a blob. The operation is + allowed on a page blob in a premium storage account and on a block blob + in a blob storage account (locally redundant storage only). A premium + page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. Possible values + include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', + 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which + to rehydrate an archived blob. Possible values include: 'High', + 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "tier" + + # Construct URL + url = self.set_tier.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_tier.metadata = {'url': '/{containerName}/{blob}'} + + async def get_account_info(self, *, cls=None, **kwargs): + """Returns the sku name and account kind . + + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.get_account_info.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), + 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_account_info.metadata = {'url': '/{containerName}/{blob}'} + + async def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Query operation enables users to select/project on blob data by + providing simple query expressions. + + :param query_request: the query request + :type query_request: ~azure.storage.blob.models.QueryRequest + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "query" + + # Construct URL + url = self.query.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + query.metadata = {'url': '/{containerName}/{blob}'} + + async def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Get Tags operation enables users to get the tags associated with a + blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: BlobTags or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "tags" + + # Construct URL + url = self.get_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BlobTags', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} + + async def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param tags: Blob tags + :type tags: ~azure.storage.blob.models.BlobTags + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "tags" + + # Construct URL + url = self.set_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags') + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py new file mode 100644 index 0000000..e069370 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py @@ -0,0 +1,833 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class BlockBlobOperations: + """BlockBlobOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_blob_type = "BlockBlob" + + async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Upload Block Blob operation updates the content of an existing + block blob. Updating an existing block blob overwrites any existing + metadata on the blob. Partial updates are not supported with Put Blob; + the content of the existing blob is overwritten with the content of the + new blob. To perform a partial update of the content of a block blob, + use the Put Block List operation. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.upload.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload.metadata = {'url': '/{containerName}/{blob}'} + + async def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, *, cls=None, **kwargs): + """The Stage Block operation creates a new block to be committed as part + of a blob. + + :param block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data + :type body: Generator + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + + comp = "block" + + # Construct URL + url = self.stage_block.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + stage_block.metadata = {'url': '/{containerName}/{blob}'} + + async def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """The Stage Block operation creates a new block to be committed as part + of a blob where the contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range + of bytes that must be read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + comp = "block" + + # Construct URL + url = self.stage_block_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. In order to be written as part of a + blob, a block must have been successfully written to the server in a + prior Put Block operation. You can call Put Block List to update a blob + by uploading only those blocks that have changed, then committing the + new and existing blocks together. You can do this by specifying whether + to commit a block from the committed block list or from the uncommitted + block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "blocklist" + + # Construct URL + url = self.commit_block_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + body_content = self._serialize.body(blocks, 'BlockLookupList') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + commit_block_list.metadata = {'url': '/{containerName}/{blob}'} + + async def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param list_type: Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: BlockList or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "blocklist" + + # Construct URL + url = self.get_block_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BlockList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py new file mode 100644 index 0000000..b7e1eb8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py @@ -0,0 +1,1400 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ContainerOperations: + """ContainerOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + async def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, *, cls=None, **kwargs): + """creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param access: Specifies whether data in the container may be accessed + publicly and the level of access. Possible values include: + 'container', 'blob' + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param container_cpk_scope_info: Additional parameters for the + operation + :type container_cpk_scope_info: + ~azure.storage.blob.models.ContainerCpkScopeInfo + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + default_encryption_scope = None + if container_cpk_scope_info is not None: + default_encryption_scope = container_cpk_scope_info.default_encryption_scope + prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + + restype = "container" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if default_encryption_scope is not None: + header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str') + if prevent_encryption_scope_override is not None: + header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{containerName}'} + + async def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + """returns all user-defined metadata and system properties for the + specified container. The data returned does not include the container's + list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + restype = "container" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), + 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), + 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), + 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')), + 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{containerName}'} + + async def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """operation marks the specified container for deletion. The container and + any blobs contained within it are later deleted during garbage + collection. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + restype = "container" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{containerName}'} + + async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """operation sets one or more user-defined name-value pairs for the + specified container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + + restype = "container" + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{containerName}'} + + async def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + """gets the permissions for the specified container. The permissions + indicate whether container data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + restype = "container" + comp = "acl" + + # Construct URL + url = self.get_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[SignedIdentifier]', response) + header_dict = { + 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_access_policy.metadata = {'url': '/{containerName}'} + + async def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """sets the permissions for the specified container. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param container_acl: the acls for the container + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param access: Specifies whether data in the container may be accessed + publicly and the level of access. Possible values include: + 'container', 'blob' + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + restype = "container" + comp = "acl" + + # Construct URL + url = self.set_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} + if container_acl is not None: + body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{containerName}'} + + async def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, *, cls=None, **kwargs): + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_container_name: Optional. Version 2019-12-12 and + laster. Specifies the name of the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and + laster. Specifies the version of the deleted container to restore. + :type deleted_container_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{containerName}'} + + async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{containerName}'} + + async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{containerName}'} + + async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "renew" + + # Construct URL + url = self.renew_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + renew_lease.metadata = {'url': '/{containerName}'} + + async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param break_period: For a break operation, proposed duration the + lease should continue before it is broken, in seconds, between 0 and + 60. This break period is only used if it is shorter than the time + remaining on the lease. If longer, the time remaining on the lease is + used. A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break period. + If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, + and an infinite lease breaks immediately. + :type break_period: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{containerName}'} + + async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{containerName}'} + + async def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): + """[Update] The List Blobs operation returns a list of the blobs under the + specified container. + + :param prefix: Filters the results to return only containers whose + name begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListBlobsFlatSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "list" + + # Construct URL + url = self.list_blob_flat_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_blob_flat_segment.metadata = {'url': '/{containerName}'} + + async def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): + """[Update] The List Blobs operation returns a list of the blobs under the + specified container. + + :param delimiter: When the request includes this parameter, the + operation returns a BlobPrefix element in the response body that acts + as a placeholder for all blobs whose names begin with the same + substring up to the appearance of the delimiter character. The + delimiter may be a single character or a string. + :type delimiter: str + :param prefix: Filters the results to return only containers whose + name begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListBlobsHierarchySegmentResponse or the result of + cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "list" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} + + async def get_account_info(self, *, cls=None, **kwargs): + """Returns the sku name and account kind . + + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "account" + comp = "properties" + + # Construct URL + url = self.get_account_info.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), + 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py new file mode 100644 index 0000000..590c0f8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py @@ -0,0 +1,739 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class DirectoryOperations: + """DirectoryOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar resource: . Constant value: "directory". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.resource = "directory" + + async def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Create a directory. By default, the destination is overwritten and if + the destination already exists and has a lease the lease is broken. + This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param directory_properties: Optional. User-defined properties to be + stored with the file or directory, in the format of a comma-separated + list of name and value pairs "n1=v1, n2=v2, ...", where each value is + base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled + for the account. This umask restricts permission settings for file and + directory, and will only be applied when default Acl does not exist in + parent directory. If the umask bit has set, it means that the + corresponding permission will be disabled. Otherwise the corresponding + permission will be determined by the permission. A 4-digit octal + notation (e.g. 0022) is supported here. If no umask was specified, a + default umask - 0027 will be used. + :type posix_umask: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param directory_http_headers: Additional parameters for the operation + :type directory_http_headers: + ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if directory_http_headers is not None: + cache_control = directory_http_headers.cache_control + content_type = None + if directory_http_headers is not None: + content_type = directory_http_headers.content_type + content_encoding = None + if directory_http_headers is not None: + content_encoding = directory_http_headers.content_encoding + content_language = None + if directory_http_headers is not None: + content_language = directory_http_headers.content_language + content_disposition = None + if directory_http_headers is not None: + content_disposition = directory_http_headers.content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') + + # Construct headers + header_parameters = {} + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}/{path}'} + + async def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """Rename a directory. By default, the destination is overwritten and if + the destination already exists and has a lease the lease is broken. + This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value + must have the following format: "/{filesysystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are + renamed with each invocation is limited. If the number of paths to be + renamed exceeds this limit, a continuation token is returned in this + response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the + rename operation to continue renaming the directory. + :type marker: str + :param path_rename_mode: Determines the behavior of the rename + operation. Possible values include: 'legacy', 'posix' + :type path_rename_mode: str or + ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be + stored with the file or directory, in the format of a comma-separated + list of name and value pairs "n1=v1, n2=v2, ...", where each value is + base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled + for the account. This umask restricts permission settings for file and + directory, and will only be applied when default Acl does not exist in + parent directory. If the umask bit has set, it means that the + corresponding permission will be disabled. Otherwise the corresponding + permission will be determined by the permission. A 4-digit octal + notation (e.g. 0022) is supported here. If no umask was specified, a + default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param directory_http_headers: Additional parameters for the operation + :type directory_http_headers: + ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if directory_http_headers is not None: + cache_control = directory_http_headers.cache_control + content_type = None + if directory_http_headers is not None: + content_type = directory_http_headers.content_type + content_encoding = None + if directory_http_headers is not None: + content_encoding = directory_http_headers.content_encoding + content_language = None + if directory_http_headers is not None: + content_language = directory_http_headers.content_language + content_disposition = None + if directory_http_headers is not None: + content_disposition = directory_http_headers.content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + # Construct URL + url = self.rename.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + rename.metadata = {'url': '/{filesystem}/{path}'} + + async def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Deletes the directory. + + :param recursive_directory_delete: If "true", all paths beneath the + directory will be deleted. If "false" and the directory is non-empty, + an error occurs. + :type recursive_directory_delete: bool + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are + renamed with each invocation is limited. If the number of paths to be + renamed exceeds this limit, a continuation token is returned in this + response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the + rename operation to continue renaming the directory. + :type marker: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}/{path}'} + + async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a + directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and + directories. The value is a comma-separated list of access control + entries. Each access control entry (ACE) consists of a scope, a type, + a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Get the owner, group, permissions, or access control list for a + directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the identity values returned in + the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "getAccessControl" + + # Construct URL + url = self.get_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py new file mode 100644 index 0000000..c54a27c --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py @@ -0,0 +1,1399 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class PageBlobOperations: + """PageBlobOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_blob_type = "PageBlob" + + async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Create operation creates a new page blob. + + :param content_length: The length of the request. + :type content_length: long + :param blob_content_length: This header specifies the maximum size for + the page blob, up to 1 TB. The page blob size must be aligned to a + 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80' + :type tier: str or + ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param blob_sequence_number: Set for page blobs only. The sequence + number is a user-controlled value that you can use to track requests. + The value of the sequence number must be between 0 and 2^63 - 1. + :type blob_sequence_number: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{containerName}/{blob}'} + + async def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Upload Pages operation writes a range of pages to a page blob. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Additional parameters for + the operation + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_sequence_number_less_than_or_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + if_sequence_number_less_than = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + if_sequence_number_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "page" + page_write = "update" + + # Construct URL + url = self.upload_pages.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') + if if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') + if if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_pages.metadata = {'url': '/{containerName}/{blob}'} + + async def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Additional parameters for + the operation + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_sequence_number_less_than_or_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + if_sequence_number_less_than = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + if_sequence_number_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "page" + page_write = "clear" + + # Construct URL + url = self.clear_pages.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') + if if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') + if if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + clear_pages.metadata = {'url': '/{containerName}/{blob}'} + + async def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The + length of this range should match the ContentLength header and + x-ms-range/Range destination range header. + :type source_range: str + :param content_length: The length of the request. + :type content_length: long + :param range: The range of bytes to which the source range would be + written. The range should be 512 aligned and range-end is required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range + of bytes that must be read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Additional parameters for + the operation + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_sequence_number_less_than_or_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + if_sequence_number_less_than = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + if_sequence_number_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + comp = "page" + page_write = "update" + + # Construct URL + url = self.upload_pages_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') + if if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') + if if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Get Page Ranges operation returns the list of valid page ranges for + a page blob or snapshot of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "pagelist" + + # Construct URL + url = self.get_page_ranges.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PageList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} + + async def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Get Page Ranges Diff operation returns the list of valid page + ranges for a page blob that were changed between target blob and + previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The + prevsnapshot parameter is a DateTime value that specifies that the + response will contain only pages that were changed between target blob + and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot + specified by prevsnapshot is the older of the two. Note that + incremental snapshots are currently supported only for blobs created + on or after January 1, 2016. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in + service versions 2019-04-19 and after and specifies the URL of a + previous snapshot of the target blob. The response will only contain + pages that were changed between the target blob and its previous + snapshot. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "pagelist" + + # Construct URL + url = self.get_page_ranges_diff.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if prevsnapshot is not None: + query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if prev_snapshot_url is not None: + header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PageList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} + + async def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for + the page blob, up to 1 TB. The page blob size must be aligned to a + 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "properties" + + # Construct URL + url = self.resize.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + resize.metadata = {'url': '/{containerName}/{blob}'} + + async def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the + x-ms-blob-sequence-number header is set for the request. This property + applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. Possible values include: + 'max', 'update', 'increment' + :type sequence_number_action: str or + ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence + number is a user-controlled value that you can use to track requests. + The value of the sequence number must be between 0 and 2^63 - 1. + :type blob_sequence_number: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "properties" + + # Construct URL + url = self.update_sequence_number.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} + + async def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The Copy Incremental operation copies a snapshot of the source page + blob to a destination page blob. The snapshot is copied such that only + the differential changes between the previously copied snapshot are + transferred to the destination. The copied snapshots are complete + copies of the original snapshot and can be read or copied from as + usual. This API is supported since REST version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob + snapshot. This value is a URL of up to 2 KB in length that specifies a + page blob snapshot. The value should be URL-encoded as it would appear + in a request URI. The source blob must either be public or must be + authenticated via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "incrementalcopy" + + # Construct URL + url = self.copy_incremental.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py new file mode 100644 index 0000000..e12c2b9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py @@ -0,0 +1,664 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs): + """Sets properties for a storage account's Blob service endpoint, + including properties for Storage Analytics and CORS (Cross-Origin + Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: + ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/'} + + async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """gets the properties of a storage account's Blob service, including + properties for Storage Analytics and CORS (Cross-Origin Resource + Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "properties" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceProperties', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_properties.metadata = {'url': '/'} + + async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """Retrieves statistics related to replication for the Blob service. It is + only available on the secondary location endpoint when read-access + geo-redundant replication is enabled for the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceStats or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "stats" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceStats', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_statistics.metadata = {'url': '/'} + + async def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): + """The List Containers Segment operation returns a list of the containers + under the specified account. + + :param prefix: Filters the results to return only containers whose + name begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the container's + metadata be returned as part of the response body. + :type include: list[str or + ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListContainersSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_containers_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListContainersSegmentResponse', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_containers_segment.metadata = {'url': '/'} + + async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, *, cls=None, **kwargs): + """Retrieves a user delegation key for the Blob service. This is only a + valid operation when using bearer token authentication. + + :param key_info: + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: UserDelegationKey or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "userdelegationkey" + + # Construct URL + url = self.get_user_delegation_key.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + body_content = self._serialize.body(key_info, 'KeyInfo') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UserDelegationKey', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_user_delegation_key.metadata = {'url': '/'} + + async def get_account_info(self, *, cls=None, **kwargs): + """Returns the sku name and account kind . + + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "account" + comp = "properties" + + # Construct URL + url = self.get_account_info.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), + 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_account_info.metadata = {'url': '/'} + + async def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, *, cls=None, **kwargs): + """The Batch operation allows multiple API calls to be embedded into a + single HTTP request. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must + be multipart/mixed with a batch boundary. Example header value: + multipart/mixed; boundary=batch_ + :type multipart_content_type: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "batch" + + # Construct URL + url = self.submit_batch.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + submit_batch.metadata = {'url': '/'} + + async def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, *, cls=None, **kwargs): + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param where: Filters the results to return only to return only blobs + whose tags match the specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "blobs" + + # Construct URL + url = self.filter_blobs.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FilterBlobSegment', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + filter_blobs.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py new file mode 100644 index 0000000..3a6f8ed --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py @@ -0,0 +1,229 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import AppendPositionAccessConditions + from ._models_py3 import ArrowConfiguration + from ._models_py3 import ArrowField + from ._models_py3 import BlobFlatListSegment + from ._models_py3 import BlobHierarchyListSegment + from ._models_py3 import BlobHTTPHeaders + from ._models_py3 import BlobItemInternal + from ._models_py3 import BlobMetadata + from ._models_py3 import BlobPrefix + from ._models_py3 import BlobPropertiesInternal + from ._models_py3 import BlobTag + from ._models_py3 import BlobTags + from ._models_py3 import Block + from ._models_py3 import BlockList + from ._models_py3 import BlockLookupList + from ._models_py3 import ClearRange + from ._models_py3 import ContainerCpkScopeInfo + from ._models_py3 import ContainerItem + from ._models_py3 import ContainerProperties + from ._models_py3 import CorsRule + from ._models_py3 import CpkInfo + from ._models_py3 import CpkScopeInfo + from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException + from ._models_py3 import DataLakeStorageErrorError + from ._models_py3 import DelimitedTextConfiguration + from ._models_py3 import DirectoryHttpHeaders + from ._models_py3 import FilterBlobItem + from ._models_py3 import FilterBlobSegment + from ._models_py3 import GeoReplication + from ._models_py3 import JsonTextConfiguration + from ._models_py3 import KeyInfo + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListBlobsFlatSegmentResponse + from ._models_py3 import ListBlobsHierarchySegmentResponse + from ._models_py3 import ListContainersSegmentResponse + from ._models_py3 import Logging + from ._models_py3 import Metrics + from ._models_py3 import ModifiedAccessConditions + from ._models_py3 import PageList + from ._models_py3 import PageRange + from ._models_py3 import QueryFormat + from ._models_py3 import QueryRequest + from ._models_py3 import QuerySerialization + from ._models_py3 import RetentionPolicy + from ._models_py3 import SequenceNumberAccessConditions + from ._models_py3 import SignedIdentifier + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StaticWebsite + from ._models_py3 import StorageError, StorageErrorException + from ._models_py3 import StorageServiceProperties + from ._models_py3 import StorageServiceStats + from ._models_py3 import UserDelegationKey +except (SyntaxError, ImportError): + from ._models import AccessPolicy + from ._models import AppendPositionAccessConditions + from ._models import ArrowConfiguration + from ._models import ArrowField + from ._models import BlobFlatListSegment + from ._models import BlobHierarchyListSegment + from ._models import BlobHTTPHeaders + from ._models import BlobItemInternal + from ._models import BlobMetadata + from ._models import BlobPrefix + from ._models import BlobPropertiesInternal + from ._models import BlobTag + from ._models import BlobTags + from ._models import Block + from ._models import BlockList + from ._models import BlockLookupList + from ._models import ClearRange + from ._models import ContainerCpkScopeInfo + from ._models import ContainerItem + from ._models import ContainerProperties + from ._models import CorsRule + from ._models import CpkInfo + from ._models import CpkScopeInfo + from ._models import DataLakeStorageError, DataLakeStorageErrorException + from ._models import DataLakeStorageErrorError + from ._models import DelimitedTextConfiguration + from ._models import DirectoryHttpHeaders + from ._models import FilterBlobItem + from ._models import FilterBlobSegment + from ._models import GeoReplication + from ._models import JsonTextConfiguration + from ._models import KeyInfo + from ._models import LeaseAccessConditions + from ._models import ListBlobsFlatSegmentResponse + from ._models import ListBlobsHierarchySegmentResponse + from ._models import ListContainersSegmentResponse + from ._models import Logging + from ._models import Metrics + from ._models import ModifiedAccessConditions + from ._models import PageList + from ._models import PageRange + from ._models import QueryFormat + from ._models import QueryRequest + from ._models import QuerySerialization + from ._models import RetentionPolicy + from ._models import SequenceNumberAccessConditions + from ._models import SignedIdentifier + from ._models import SourceModifiedAccessConditions + from ._models import StaticWebsite + from ._models import StorageError, StorageErrorException + from ._models import StorageServiceProperties + from ._models import StorageServiceStats + from ._models import UserDelegationKey +from ._azure_blob_storage_enums import ( + AccessTier, + AccessTierOptional, + AccessTierRequired, + AccountKind, + ArchiveStatus, + BlobExpiryOptions, + BlobType, + BlockListType, + CopyStatusType, + DeleteSnapshotsOptionType, + EncryptionAlgorithmType, + GeoReplicationStatusType, + LeaseDurationType, + LeaseStateType, + LeaseStatusType, + ListBlobsIncludeItem, + ListContainersIncludeType, + PathRenameMode, + PremiumPageBlobAccessTier, + PublicAccessType, + QueryFormatType, + RehydratePriority, + SequenceNumberActionType, + SkuName, + StorageErrorCode, + SyncCopyStatusType, +) + +__all__ = [ + 'AccessPolicy', + 'AppendPositionAccessConditions', + 'ArrowConfiguration', + 'ArrowField', + 'BlobFlatListSegment', + 'BlobHierarchyListSegment', + 'BlobHTTPHeaders', + 'BlobItemInternal', + 'BlobMetadata', + 'BlobPrefix', + 'BlobPropertiesInternal', + 'BlobTag', + 'BlobTags', + 'Block', + 'BlockList', + 'BlockLookupList', + 'ClearRange', + 'ContainerCpkScopeInfo', + 'ContainerItem', + 'ContainerProperties', + 'CorsRule', + 'CpkInfo', + 'CpkScopeInfo', + 'DataLakeStorageError', 'DataLakeStorageErrorException', + 'DataLakeStorageErrorError', + 'DelimitedTextConfiguration', + 'DirectoryHttpHeaders', + 'FilterBlobItem', + 'FilterBlobSegment', + 'GeoReplication', + 'JsonTextConfiguration', + 'KeyInfo', + 'LeaseAccessConditions', + 'ListBlobsFlatSegmentResponse', + 'ListBlobsHierarchySegmentResponse', + 'ListContainersSegmentResponse', + 'Logging', + 'Metrics', + 'ModifiedAccessConditions', + 'PageList', + 'PageRange', + 'QueryFormat', + 'QueryRequest', + 'QuerySerialization', + 'RetentionPolicy', + 'SequenceNumberAccessConditions', + 'SignedIdentifier', + 'SourceModifiedAccessConditions', + 'StaticWebsite', + 'StorageError', 'StorageErrorException', + 'StorageServiceProperties', + 'StorageServiceStats', + 'UserDelegationKey', + 'PublicAccessType', + 'CopyStatusType', + 'LeaseDurationType', + 'LeaseStateType', + 'LeaseStatusType', + 'AccessTier', + 'ArchiveStatus', + 'BlobType', + 'RehydratePriority', + 'StorageErrorCode', + 'GeoReplicationStatusType', + 'QueryFormatType', + 'AccessTierRequired', + 'AccessTierOptional', + 'PremiumPageBlobAccessTier', + 'BlobExpiryOptions', + 'BlockListType', + 'DeleteSnapshotsOptionType', + 'EncryptionAlgorithmType', + 'ListBlobsIncludeItem', + 'ListContainersIncludeType', + 'PathRenameMode', + 'SequenceNumberActionType', + 'SkuName', + 'AccountKind', + 'SyncCopyStatusType', +] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py new file mode 100644 index 0000000..e45eea3 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py @@ -0,0 +1,343 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class PublicAccessType(str, Enum): + + container = "container" + blob = "blob" + + +class CopyStatusType(str, Enum): + + pending = "pending" + success = "success" + aborted = "aborted" + failed = "failed" + + +class LeaseDurationType(str, Enum): + + infinite = "infinite" + fixed = "fixed" + + +class LeaseStateType(str, Enum): + + available = "available" + leased = "leased" + expired = "expired" + breaking = "breaking" + broken = "broken" + + +class LeaseStatusType(str, Enum): + + locked = "locked" + unlocked = "unlocked" + + +class AccessTier(str, Enum): + + p4 = "P4" + p6 = "P6" + p10 = "P10" + p15 = "P15" + p20 = "P20" + p30 = "P30" + p40 = "P40" + p50 = "P50" + p60 = "P60" + p70 = "P70" + p80 = "P80" + hot = "Hot" + cool = "Cool" + archive = "Archive" + + +class ArchiveStatus(str, Enum): + + rehydrate_pending_to_hot = "rehydrate-pending-to-hot" + rehydrate_pending_to_cool = "rehydrate-pending-to-cool" + + +class BlobType(str, Enum): + + block_blob = "BlockBlob" + page_blob = "PageBlob" + append_blob = "AppendBlob" + + +class RehydratePriority(str, Enum): + + high = "High" + standard = "Standard" + + +class StorageErrorCode(str, Enum): + + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_authentication_information = "NoAuthenticationInformation" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" + authorization_protocol_mismatch = "AuthorizationProtocolMismatch" + authorization_permission_mismatch = "AuthorizationPermissionMismatch" + authorization_service_mismatch = "AuthorizationServiceMismatch" + authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" + + +class GeoReplicationStatusType(str, Enum): + + live = "live" + bootstrap = "bootstrap" + unavailable = "unavailable" + + +class QueryFormatType(str, Enum): + + delimited = "delimited" + json = "json" + arrow = "arrow" + + +class AccessTierRequired(str, Enum): + + p4 = "P4" + p6 = "P6" + p10 = "P10" + p15 = "P15" + p20 = "P20" + p30 = "P30" + p40 = "P40" + p50 = "P50" + p60 = "P60" + p70 = "P70" + p80 = "P80" + hot = "Hot" + cool = "Cool" + archive = "Archive" + + +class AccessTierOptional(str, Enum): + + p4 = "P4" + p6 = "P6" + p10 = "P10" + p15 = "P15" + p20 = "P20" + p30 = "P30" + p40 = "P40" + p50 = "P50" + p60 = "P60" + p70 = "P70" + p80 = "P80" + hot = "Hot" + cool = "Cool" + archive = "Archive" + + +class PremiumPageBlobAccessTier(str, Enum): + + p4 = "P4" + p6 = "P6" + p10 = "P10" + p15 = "P15" + p20 = "P20" + p30 = "P30" + p40 = "P40" + p50 = "P50" + p60 = "P60" + p70 = "P70" + p80 = "P80" + + +class BlobExpiryOptions(str, Enum): + + never_expire = "NeverExpire" + relative_to_creation = "RelativeToCreation" + relative_to_now = "RelativeToNow" + absolute = "Absolute" + + +class BlockListType(str, Enum): + + committed = "committed" + uncommitted = "uncommitted" + all = "all" + + +class DeleteSnapshotsOptionType(str, Enum): + + include = "include" + only = "only" + + +class EncryptionAlgorithmType(str, Enum): + + aes256 = "AES256" + + +class ListBlobsIncludeItem(str, Enum): + + copy = "copy" + deleted = "deleted" + metadata = "metadata" + snapshots = "snapshots" + uncommittedblobs = "uncommittedblobs" + versions = "versions" + tags = "tags" + + +class ListContainersIncludeType(str, Enum): + + metadata = "metadata" + deleted = "deleted" + + +class PathRenameMode(str, Enum): + + legacy = "legacy" + posix = "posix" + + +class SequenceNumberActionType(str, Enum): + + max = "max" + update = "update" + increment = "increment" + + +class SkuName(str, Enum): + + standard_lrs = "Standard_LRS" + standard_grs = "Standard_GRS" + standard_ragrs = "Standard_RAGRS" + standard_zrs = "Standard_ZRS" + premium_lrs = "Premium_LRS" + + +class AccountKind(str, Enum): + + storage = "Storage" + blob_storage = "BlobStorage" + storage_v2 = "StorageV2" + file_storage = "FileStorage" + block_blob_storage = "BlockBlobStorage" + + +class SyncCopyStatusType(str, Enum): + + success = "success" diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py new file mode 100644 index 0000000..1fdddbe --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py @@ -0,0 +1,2009 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + :param start: the date-time the policy is active + :type start: str + :param expiry: the date-time the policy expires + :type expiry: str + :param permission: the permissions for the acl policy + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class AppendPositionAccessConditions(Model): + """Additional parameters for a set of operations, such as: + AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. + + :param max_size: Optional conditional header. The max length in bytes + permitted for the append blob. If the Append Block operation would cause + the blob to exceed that limit or if the blob size is already greater than + the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition + Failed). + :type max_size: long + :param append_position: Optional conditional header, used only for the + Append Block operation. A number indicating the byte offset to compare. + Append Block will succeed only if the append position is equal to this + number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition + Failed). + :type append_position: long + """ + + _attribute_map = { + 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, + 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(AppendPositionAccessConditions, self).__init__(**kwargs) + self.max_size = kwargs.get('max_size', None) + self.append_position = kwargs.get('append_position', None) + + +class ArrowConfiguration(Model): + """arrow configuration. + + All required parameters must be populated in order to send to Azure. + + :param schema: Required. + :type schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + 'schema': {'required': True}, + } + + _attribute_map = { + 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'itemsName': 'Schema', 'wrapped': True}}, + } + _xml_map = { + 'name': 'ArrowConfiguration' + } + + def __init__(self, **kwargs): + super(ArrowConfiguration, self).__init__(**kwargs) + self.schema = kwargs.get('schema', None) + + +class ArrowField(Model): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. + :type type: str + :param name: + :type name: str + :param precision: + :type precision: int + :param scale: + :type scale: int + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'precision': {'key': 'Precision', 'type': 'int', 'xml': {'name': 'Precision'}}, + 'scale': {'key': 'Scale', 'type': 'int', 'xml': {'name': 'Scale'}}, + } + _xml_map = { + 'name': 'Field' + } + + def __init__(self, **kwargs): + super(ArrowField, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.name = kwargs.get('name', None) + self.precision = kwargs.get('precision', None) + self.scale = kwargs.get('scale', None) + + +class BlobFlatListSegment(Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__(self, **kwargs): + super(BlobFlatListSegment, self).__init__(**kwargs) + self.blob_items = kwargs.get('blob_items', None) + + +class BlobHierarchyListSegment(Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__(self, **kwargs): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = kwargs.get('blob_prefixes', None) + self.blob_items = kwargs.get('blob_items', None) + + +class BlobHTTPHeaders(Model): + """Additional parameters for a set of operations. + + :param blob_cache_control: Optional. Sets the blob's cache control. If + specified, this property is stored with the blob and returned with a read + request. + :type blob_cache_control: str + :param blob_content_type: Optional. Sets the blob's content type. If + specified, this property is stored with the blob and returned with a read + request. + :type blob_content_type: str + :param blob_content_md5: Optional. An MD5 hash of the blob content. Note + that this hash is not validated, as the hashes for the individual blocks + were validated when each was uploaded. + :type blob_content_md5: bytearray + :param blob_content_encoding: Optional. Sets the blob's content encoding. + If specified, this property is stored with the blob and returned with a + read request. + :type blob_content_encoding: str + :param blob_content_language: Optional. Set the blob's content language. + If specified, this property is stored with the blob and returned with a + read request. + :type blob_content_language: str + :param blob_content_disposition: Optional. Sets the blob's + Content-Disposition header. + :type blob_content_disposition: str + """ + + _attribute_map = { + 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, + 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, + 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, + 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, + 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, + 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(BlobHTTPHeaders, self).__init__(**kwargs) + self.blob_cache_control = kwargs.get('blob_cache_control', None) + self.blob_content_type = kwargs.get('blob_content_type', None) + self.blob_content_md5 = kwargs.get('blob_content_md5', None) + self.blob_content_encoding = kwargs.get('blob_content_encoding', None) + self.blob_content_language = kwargs.get('blob_content_language', None) + self.blob_content_disposition = kwargs.get('blob_content_disposition', None) + + +class BlobItemInternal(Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. + :type properties: ~azure.storage.blob.models.BlobPropertiesInternal + :param metadata: + :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_metadata: + :type object_replication_metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__(self, **kwargs): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.deleted = kwargs.get('deleted', None) + self.snapshot = kwargs.get('snapshot', None) + self.version_id = kwargs.get('version_id', None) + self.is_current_version = kwargs.get('is_current_version', None) + self.properties = kwargs.get('properties', None) + self.metadata = kwargs.get('metadata', None) + self.blob_tags = kwargs.get('blob_tags', None) + self.object_replication_metadata = kwargs.get('object_replication_metadata', None) + + +class BlobMetadata(Model): + """BlobMetadata. + + :param additional_properties: Unmatched properties from the message are + deserialized this collection + :type additional_properties: dict[str, str] + :param encrypted: + :type encrypted: str + """ + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, + 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, + } + _xml_map = { + 'name': 'Metadata' + } + + def __init__(self, **kwargs): + super(BlobMetadata, self).__init__(**kwargs) + self.additional_properties = kwargs.get('additional_properties', None) + self.encrypted = kwargs.get('encrypted', None) + + +class BlobPrefix(Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(BlobPrefix, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + + +class BlobPropertiesInternal(Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: datetime + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', + 'AppendBlob' + :type blob_type: str or ~azure.storage.blob.models.BlobType + :param lease_status: Possible values include: 'locked', 'unlocked' + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: 'available', 'leased', + 'expired', 'breaking', 'broken' + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: 'infinite', 'fixed' + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param copy_id: + :type copy_id: str + :param copy_status: Possible values include: 'pending', 'success', + 'aborted', 'failed' + :type copy_status: str or ~azure.storage.blob.models.CopyStatusType + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', + 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type access_tier: str or ~azure.storage.blob.models.AccessTier + :param access_tier_inferred: + :type access_tier_inferred: bool + :param archive_status: Possible values include: + 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' + :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the + blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: datetime + :param is_sealed: + :type is_sealed: bool + :param rehydrate_priority: Possible values include: 'High', 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority + :param last_accessed_on: + :type last_accessed_on: datetime + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, + 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, + 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, + 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, + 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, + 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, + 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, + 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, + 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, + 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123', 'xml': {'name': 'LastAccessTime'}}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__(self, **kwargs): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs.get('last_modified', None) + self.etag = kwargs.get('etag', None) + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_md5 = kwargs.get('content_md5', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.cache_control = kwargs.get('cache_control', None) + self.blob_sequence_number = kwargs.get('blob_sequence_number', None) + self.blob_type = kwargs.get('blob_type', None) + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.copy_id = kwargs.get('copy_id', None) + self.copy_status = kwargs.get('copy_status', None) + self.copy_source = kwargs.get('copy_source', None) + self.copy_progress = kwargs.get('copy_progress', None) + self.copy_completion_time = kwargs.get('copy_completion_time', None) + self.copy_status_description = kwargs.get('copy_status_description', None) + self.server_encrypted = kwargs.get('server_encrypted', None) + self.incremental_copy = kwargs.get('incremental_copy', None) + self.destination_snapshot = kwargs.get('destination_snapshot', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.access_tier = kwargs.get('access_tier', None) + self.access_tier_inferred = kwargs.get('access_tier_inferred', None) + self.archive_status = kwargs.get('archive_status', None) + self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) + self.encryption_scope = kwargs.get('encryption_scope', None) + self.access_tier_change_time = kwargs.get('access_tier_change_time', None) + self.tag_count = kwargs.get('tag_count', None) + self.expires_on = kwargs.get('expires_on', None) + self.is_sealed = kwargs.get('is_sealed', None) + self.rehydrate_priority = kwargs.get('rehydrate_priority', None) + self.last_accessed_on = kwargs.get('last_accessed_on', None) + + +class BlobTag(Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, + 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__(self, **kwargs): + super(BlobTag, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) + + +class BlobTags(Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__(self, **kwargs): + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = kwargs.get('blob_tag_set', None) + + +class Block(Model): + """Represents a single block in a block blob. It describes the block's ID and + size. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The base64 encoded block ID. + :type name: str + :param size: Required. The block size in bytes. + :type size: int + """ + + _validation = { + 'name': {'required': True}, + 'size': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Block, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.size = kwargs.get('size', None) + + +class BlockList(Model): + """BlockList. + + :param committed_blocks: + :type committed_blocks: list[~azure.storage.blob.models.Block] + :param uncommitted_blocks: + :type uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, + 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(BlockList, self).__init__(**kwargs) + self.committed_blocks = kwargs.get('committed_blocks', None) + self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) + + +class BlockLookupList(Model): + """BlockLookupList. + + :param committed: + :type committed: list[str] + :param uncommitted: + :type uncommitted: list[str] + :param latest: + :type latest: list[str] + """ + + _attribute_map = { + 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, + 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, + 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, + } + _xml_map = { + 'name': 'BlockList' + } + + def __init__(self, **kwargs): + super(BlockLookupList, self).__init__(**kwargs) + self.committed = kwargs.get('committed', None) + self.uncommitted = kwargs.get('uncommitted', None) + self.latest = kwargs.get('latest', None) + + +class ClearRange(Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__(self, **kwargs): + super(ClearRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + + +class ContainerCpkScopeInfo(Model): + """Additional parameters for create operation. + + :param default_encryption_scope: Optional. Version 2019-07-07 and later. + Specifies the default encryption scope to set on the container and use for + all future writes. + :type default_encryption_scope: str + :param prevent_encryption_scope_override: Optional. Version 2019-07-07 + and newer. If true, prevents any request from specifying a different + encryption scope than the scope set on the container. + :type prevent_encryption_scope_override: bool + """ + + _attribute_map = { + 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}}, + 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ContainerCpkScopeInfo, self).__init__(**kwargs) + self.default_encryption_scope = kwargs.get('default_encryption_scope', None) + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + + +class ContainerItem(Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. + :type properties: ~azure.storage.blob.models.ContainerProperties + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Container' + } + + def __init__(self, **kwargs): + super(ContainerItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) + self.properties = kwargs.get('properties', None) + self.metadata = kwargs.get('metadata', None) + + +class ContainerProperties(Model): + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param lease_status: Possible values include: 'locked', 'unlocked' + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: 'available', 'leased', + 'expired', 'breaking', 'broken' + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: 'infinite', 'fixed' + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param public_access: Possible values include: 'container', 'blob' + :type public_access: str or ~azure.storage.blob.models.PublicAccessType + :param has_immutability_policy: + :type has_immutability_policy: bool + :param has_legal_hold: + :type has_legal_hold: bool + :param default_encryption_scope: + :type default_encryption_scope: str + :param prevent_encryption_scope_override: + :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, + 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, + 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, + 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, + 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, + 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ContainerProperties, self).__init__(**kwargs) + self.last_modified = kwargs.get('last_modified', None) + self.etag = kwargs.get('etag', None) + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.public_access = kwargs.get('public_access', None) + self.has_immutability_policy = kwargs.get('has_immutability_policy', None) + self.has_legal_hold = kwargs.get('has_legal_hold', None) + self.default_encryption_scope = kwargs.get('default_encryption_scope', None) + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs.get('allowed_origins', None) + self.allowed_methods = kwargs.get('allowed_methods', None) + self.allowed_headers = kwargs.get('allowed_headers', None) + self.exposed_headers = kwargs.get('exposed_headers', None) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) + + +class CpkInfo(Model): + """Additional parameters for a set of operations. + + :param encryption_key: Optional. Specifies the encryption key to use to + encrypt the data provided in the request. If not specified, encryption is + performed with the root account encryption key. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_key: str + :param encryption_key_sha256: The SHA-256 hash of the provided encryption + key. Must be provided if the x-ms-encryption-key header is provided. + :type encryption_key_sha256: str + :param encryption_algorithm: The algorithm used to produce the encryption + key hash. Currently, the only accepted value is "AES256". Must be provided + if the x-ms-encryption-key header is provided. Possible values include: + 'AES256' + :type encryption_algorithm: str or + ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, + 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, + 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CpkInfo, self).__init__(**kwargs) + self.encryption_key = kwargs.get('encryption_key', None) + self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) + self.encryption_algorithm = kwargs.get('encryption_algorithm', None) + + +class CpkScopeInfo(Model): + """Additional parameters for a set of operations. + + :param encryption_scope: Optional. Version 2019-07-07 and later. + Specifies the name of the encryption scope to use to encrypt the data + provided in the request. If not specified, encryption is performed with + the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_scope: str + """ + + _attribute_map = { + 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CpkScopeInfo, self).__init__(**kwargs) + self.encryption_scope = kwargs.get('encryption_scope', None) + + +class DataLakeStorageError(Model): + """DataLakeStorageError. + + :param data_lake_storage_error_details: The service error response object. + :type data_lake_storage_error_details: + ~azure.storage.blob.models.DataLakeStorageErrorError + """ + + _attribute_map = { + 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(DataLakeStorageError, self).__init__(**kwargs) + self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) + + +class DataLakeStorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'DataLakeStorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'DataLakeStorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(DataLakeStorageErrorException, self).__init__(response=response) + + +class DataLakeStorageErrorError(Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(DataLakeStorageErrorError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + +class DelimitedTextConfiguration(Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator + :type column_separator: str + :param field_quote: Required. field quote + :type field_quote: str + :param record_separator: Required. record separator + :type record_separator: str + :param escape_char: Required. escape char + :type escape_char: str + :param headers_present: Required. has headers + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__(self, **kwargs): + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = kwargs.get('column_separator', None) + self.field_quote = kwargs.get('field_quote', None) + self.record_separator = kwargs.get('record_separator', None) + self.escape_char = kwargs.get('escape_char', None) + self.headers_present = kwargs.get('headers_present', None) + + +class DirectoryHttpHeaders(Model): + """Additional parameters for a set of operations, such as: Directory_create, + Directory_rename, Blob_rename. + + :param cache_control: Cache control for given resource + :type cache_control: str + :param content_type: Content type for given resource + :type content_type: str + :param content_encoding: Content encoding for given resource + :type content_encoding: str + :param content_language: Content language for given resource + :type content_language: str + :param content_disposition: Content disposition for given resource + :type content_disposition: str + """ + + _attribute_map = { + 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, + 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, + 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, + 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, + 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(DirectoryHttpHeaders, self).__init__(**kwargs) + self.cache_control = kwargs.get('cache_control', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_disposition = kwargs.get('content_disposition', None) + + +class FilterBlobItem(Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tag_value: Required. + :type tag_value: str + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + 'tag_value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, + 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__(self, **kwargs): + super(FilterBlobItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.container_name = kwargs.get('container_name', None) + self.tag_value = kwargs.get('tag_value', None) + + +class FilterBlobSegment(Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.where = kwargs.get('where', None) + self.blobs = kwargs.get('blobs', None) + self.next_marker = kwargs.get('next_marker', None) + + +class GeoReplication(Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible + values include: 'live', 'bootstrap', 'unavailable' + :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All + primary writes preceding this value are guaranteed to be available for + read operations at the secondary. Primary writes after this point in time + may or may not be available for reads. + :type last_sync_time: datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(GeoReplication, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.last_sync_time = kwargs.get('last_sync_time', None) + + +class JsonTextConfiguration(Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__(self, **kwargs): + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = kwargs.get('record_separator', None) + + +class KeyInfo(Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The date-time the key is active in ISO 8601 UTC + time + :type start: str + :param expiry: Required. The date-time the key expires in ISO 8601 UTC + time + :type expiry: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(KeyInfo, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListBlobsFlatSegmentResponse(Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobFlatListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.container_name = kwargs.get('container_name', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.segment = kwargs.get('segment', None) + self.next_marker = kwargs.get('next_marker', None) + + +class ListBlobsHierarchySegmentResponse(Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.container_name = kwargs.get('container_name', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.delimiter = kwargs.get('delimiter', None) + self.segment = kwargs.get('segment', None) + self.next_marker = kwargs.get('next_marker', None) + + +class ListContainersSegmentResponse(Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param container_items: Required. + :type container_items: list[~azure.storage.blob.models.ContainerItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_items': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListContainersSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.container_items = kwargs.get('container_items', None) + self.next_marker = kwargs.get('next_marker', None) + + +class Logging(Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be + logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be + logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be + logged. + :type write: bool + :param retention_policy: Required. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, + 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, + 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Logging, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.delete = kwargs.get('delete', None) + self.read = kwargs.get('read', None) + self.write = kwargs.get('write', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class Metrics(Model): + """a summary of request statistics grouped by API in hour or minute aggregates + for blobs. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + Blob service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.enabled = kwargs.get('enabled', None) + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class ModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param if_modified_since: Specify this header value to operate only on a + blob if it has been modified since the specified date/time. + :type if_modified_since: datetime + :param if_unmodified_since: Specify this header value to operate only on a + blob if it has not been modified since the specified date/time. + :type if_unmodified_since: datetime + :param if_match: Specify an ETag value to operate only on blobs with a + matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs + without a matching value. + :type if_none_match: str + :param if_tags: Specify a SQL where clause on blob tags to operate only on + blobs with a matching value. + :type if_tags: str + """ + + _attribute_map = { + 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, + 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, + 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, + 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_tags = kwargs.get('if_tags', None) + + +class PageList(Model): + """the list of pages. + + :param page_range: + :type page_range: list[~azure.storage.blob.models.PageRange] + :param clear_range: + :type clear_range: list[~azure.storage.blob.models.ClearRange] + """ + + _attribute_map = { + 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, + 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(PageList, self).__init__(**kwargs) + self.page_range = kwargs.get('page_range', None) + self.clear_range = kwargs.get('clear_range', None) + + +class PageRange(Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'PageRange' + } + + def __init__(self, **kwargs): + super(PageRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + + +class QueryFormat(Model): + """QueryFormat. + + :param type: Possible values include: 'delimited', 'json', 'arrow' + :type type: str or ~azure.storage.blob.models.QueryFormatType + :param delimited_text_configuration: + :type delimited_text_configuration: + ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: + :type json_text_configuration: + ~azure.storage.blob.models.JsonTextConfiguration + :param arrow_configuration: + :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, + 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration', 'xml': {'name': 'ArrowConfiguration'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(QueryFormat, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) + self.json_text_configuration = kwargs.get('json_text_configuration', None) + self.arrow_configuration = kwargs.get('arrow_configuration', None) + + +class QueryRequest(Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL" . + :vartype query_type: str + :param expression: Required. a query statement + :type expression: str + :param input_serialization: + :type input_serialization: ~azure.storage.blob.models.QuerySerialization + :param output_serialization: + :type output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__(self, **kwargs): + super(QueryRequest, self).__init__(**kwargs) + self.expression = kwargs.get('expression', None) + self.input_serialization = kwargs.get('input_serialization', None) + self.output_serialization = kwargs.get('output_serialization', None) + + +class QuerySerialization(Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(QuerySerialization, self).__init__(**kwargs) + self.format = kwargs.get('format', None) + + +class RetentionPolicy(Model): + """the retention policy which determines how long the associated data should + persist. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the storage service + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + self.days = kwargs.get('days', None) + + +class SequenceNumberAccessConditions(Model): + """Additional parameters for a set of operations, such as: + PageBlob_upload_pages, PageBlob_clear_pages, + PageBlob_upload_pages_from_url. + + :param if_sequence_number_less_than_or_equal_to: Specify this header value + to operate only on a blob if it has a sequence number less than or equal + to the specified. + :type if_sequence_number_less_than_or_equal_to: long + :param if_sequence_number_less_than: Specify this header value to operate + only on a blob if it has a sequence number less than the specified. + :type if_sequence_number_less_than: long + :param if_sequence_number_equal_to: Specify this header value to operate + only on a blob if it has the specified sequence number. + :type if_sequence_number_equal_to: long + """ + + _attribute_map = { + 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, + 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, + 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SequenceNumberAccessConditions, self).__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) + self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) + self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) + + +class SignedIdentifier(Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id + :type id: str + :param access_policy: + :type access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + 'name': 'SignedIdentifier' + } + + def __init__(self, **kwargs): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.access_policy = kwargs.get('access_policy', None) + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param source_if_modified_since: Specify this header value to operate only + on a blob if it has been modified since the specified date/time. + :type source_if_modified_since: datetime + :param source_if_unmodified_since: Specify this header value to operate + only on a blob if it has not been modified since the specified date/time. + :type source_if_unmodified_since: datetime + :param source_if_match: Specify an ETag value to operate only on blobs + with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on + blobs without a matching value. + :type source_if_none_match: str + :param source_if_tags: Specify a SQL where clause on blob tags to operate + only on blobs with a matching value. + :type source_if_tags: str + """ + + _attribute_map = { + 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, + 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, + 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, + 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, + 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_modified_since = kwargs.get('source_if_modified_since', None) + self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) + self.source_if_match = kwargs.get('source_if_match', None) + self.source_if_none_match = kwargs.get('source_if_none_match', None) + self.source_if_tags = kwargs.get('source_if_tags', None) + + +class StaticWebsite(Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether this account is hosting a + static website + :type enabled: bool + :param index_document: The default name of the index page under each + directory + :type index_document: str + :param error_document404_path: The absolute path of the custom 404 page + :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index + page + :type default_index_document_path: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, + 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StaticWebsite, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + self.index_document = kwargs.get('index_document', None) + self.error_document404_path = kwargs.get('error_document404_path', None) + self.default_index_document_path = kwargs.get('default_index_document_path', None) + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage Service Properties. + + :param logging: + :type logging: ~azure.storage.blob.models.Logging + :param hour_metrics: + :type hour_metrics: ~azure.storage.blob.models.Metrics + :param minute_metrics: + :type minute_metrics: ~azure.storage.blob.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.blob.models.CorsRule] + :param default_service_version: The default version to use for requests to + the Blob service if an incoming request's version is not specified. + Possible values include version 2008-10-27 and all more recent versions + :type default_service_version: str + :param delete_retention_policy: + :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :param static_website: + :type static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, + 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, + 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = kwargs.get('logging', None) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) + self.default_service_version = kwargs.get('default_service_version', None) + self.delete_retention_policy = kwargs.get('delete_retention_policy', None) + self.static_website = kwargs.get('static_website', None) + + +class StorageServiceStats(Model): + """Stats for the storage service. + + :param geo_replication: + :type geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = kwargs.get('geo_replication', None) + + +class UserDelegationKey(Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :param signed_oid: Required. The Azure Active Directory object ID in GUID + format. + :type signed_oid: str + :param signed_tid: Required. The Azure Active Directory tenant ID in GUID + format + :type signed_tid: str + :param signed_start: Required. The date-time the key is active + :type signed_start: datetime + :param signed_expiry: Required. The date-time the key expires + :type signed_expiry: datetime + :param signed_service: Required. Abbreviation of the Azure Storage service + that accepts the key + :type signed_service: str + :param signed_version: Required. The service version that created the key + :type signed_version: str + :param value: Required. The key as a base64 string + :type value: str + """ + + _validation = { + 'signed_oid': {'required': True}, + 'signed_tid': {'required': True}, + 'signed_start': {'required': True}, + 'signed_expiry': {'required': True}, + 'signed_service': {'required': True}, + 'signed_version': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, + 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, + 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, + 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, + 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, + 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, + 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(UserDelegationKey, self).__init__(**kwargs) + self.signed_oid = kwargs.get('signed_oid', None) + self.signed_tid = kwargs.get('signed_tid', None) + self.signed_start = kwargs.get('signed_start', None) + self.signed_expiry = kwargs.get('signed_expiry', None) + self.signed_service = kwargs.get('signed_service', None) + self.signed_version = kwargs.get('signed_version', None) + self.value = kwargs.get('value', None) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py new file mode 100644 index 0000000..7e5a3fc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py @@ -0,0 +1,2009 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + :param start: the date-time the policy is active + :type start: str + :param expiry: the date-time the policy expires + :type expiry: str + :param permission: the permissions for the acl policy + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class AppendPositionAccessConditions(Model): + """Additional parameters for a set of operations, such as: + AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. + + :param max_size: Optional conditional header. The max length in bytes + permitted for the append blob. If the Append Block operation would cause + the blob to exceed that limit or if the blob size is already greater than + the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition + Failed). + :type max_size: long + :param append_position: Optional conditional header, used only for the + Append Block operation. A number indicating the byte offset to compare. + Append Block will succeed only if the append position is equal to this + number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition + Failed). + :type append_position: long + """ + + _attribute_map = { + 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, + 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, + } + _xml_map = { + } + + def __init__(self, *, max_size: int=None, append_position: int=None, **kwargs) -> None: + super(AppendPositionAccessConditions, self).__init__(**kwargs) + self.max_size = max_size + self.append_position = append_position + + +class ArrowConfiguration(Model): + """arrow configuration. + + All required parameters must be populated in order to send to Azure. + + :param schema: Required. + :type schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + 'schema': {'required': True}, + } + + _attribute_map = { + 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'itemsName': 'Schema', 'wrapped': True}}, + } + _xml_map = { + 'name': 'ArrowConfiguration' + } + + def __init__(self, *, schema, **kwargs) -> None: + super(ArrowConfiguration, self).__init__(**kwargs) + self.schema = schema + + +class ArrowField(Model): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. + :type type: str + :param name: + :type name: str + :param precision: + :type precision: int + :param scale: + :type scale: int + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'precision': {'key': 'Precision', 'type': 'int', 'xml': {'name': 'Precision'}}, + 'scale': {'key': 'Scale', 'type': 'int', 'xml': {'name': 'Scale'}}, + } + _xml_map = { + 'name': 'Field' + } + + def __init__(self, *, type: str, name: str=None, precision: int=None, scale: int=None, **kwargs) -> None: + super(ArrowField, self).__init__(**kwargs) + self.type = type + self.name = name + self.precision = precision + self.scale = scale + + +class BlobFlatListSegment(Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__(self, *, blob_items, **kwargs) -> None: + super(BlobFlatListSegment, self).__init__(**kwargs) + self.blob_items = blob_items + + +class BlobHierarchyListSegment(Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__(self, *, blob_items, blob_prefixes=None, **kwargs) -> None: + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = blob_prefixes + self.blob_items = blob_items + + +class BlobHTTPHeaders(Model): + """Additional parameters for a set of operations. + + :param blob_cache_control: Optional. Sets the blob's cache control. If + specified, this property is stored with the blob and returned with a read + request. + :type blob_cache_control: str + :param blob_content_type: Optional. Sets the blob's content type. If + specified, this property is stored with the blob and returned with a read + request. + :type blob_content_type: str + :param blob_content_md5: Optional. An MD5 hash of the blob content. Note + that this hash is not validated, as the hashes for the individual blocks + were validated when each was uploaded. + :type blob_content_md5: bytearray + :param blob_content_encoding: Optional. Sets the blob's content encoding. + If specified, this property is stored with the blob and returned with a + read request. + :type blob_content_encoding: str + :param blob_content_language: Optional. Set the blob's content language. + If specified, this property is stored with the blob and returned with a + read request. + :type blob_content_language: str + :param blob_content_disposition: Optional. Sets the blob's + Content-Disposition header. + :type blob_content_disposition: str + """ + + _attribute_map = { + 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, + 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, + 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, + 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, + 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, + 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, blob_content_md5: bytearray=None, blob_content_encoding: str=None, blob_content_language: str=None, blob_content_disposition: str=None, **kwargs) -> None: + super(BlobHTTPHeaders, self).__init__(**kwargs) + self.blob_cache_control = blob_cache_control + self.blob_content_type = blob_content_type + self.blob_content_md5 = blob_content_md5 + self.blob_content_encoding = blob_content_encoding + self.blob_content_language = blob_content_language + self.blob_content_disposition = blob_content_disposition + + +class BlobItemInternal(Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. + :type properties: ~azure.storage.blob.models.BlobPropertiesInternal + :param metadata: + :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_metadata: + :type object_replication_metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, version_id: str=None, is_current_version: bool=None, metadata=None, blob_tags=None, object_replication_metadata=None, **kwargs) -> None: + super(BlobItemInternal, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.snapshot = snapshot + self.version_id = version_id + self.is_current_version = is_current_version + self.properties = properties + self.metadata = metadata + self.blob_tags = blob_tags + self.object_replication_metadata = object_replication_metadata + + +class BlobMetadata(Model): + """BlobMetadata. + + :param additional_properties: Unmatched properties from the message are + deserialized this collection + :type additional_properties: dict[str, str] + :param encrypted: + :type encrypted: str + """ + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, + 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, + } + _xml_map = { + 'name': 'Metadata' + } + + def __init__(self, *, additional_properties=None, encrypted: str=None, **kwargs) -> None: + super(BlobMetadata, self).__init__(**kwargs) + self.additional_properties = additional_properties + self.encrypted = encrypted + + +class BlobPrefix(Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + } + _xml_map = { + } + + def __init__(self, *, name: str, **kwargs) -> None: + super(BlobPrefix, self).__init__(**kwargs) + self.name = name + + +class BlobPropertiesInternal(Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: datetime + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', + 'AppendBlob' + :type blob_type: str or ~azure.storage.blob.models.BlobType + :param lease_status: Possible values include: 'locked', 'unlocked' + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: 'available', 'leased', + 'expired', 'breaking', 'broken' + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: 'infinite', 'fixed' + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param copy_id: + :type copy_id: str + :param copy_status: Possible values include: 'pending', 'success', + 'aborted', 'failed' + :type copy_status: str or ~azure.storage.blob.models.CopyStatusType + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', + 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type access_tier: str or ~azure.storage.blob.models.AccessTier + :param access_tier_inferred: + :type access_tier_inferred: bool + :param archive_status: Possible values include: + 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' + :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the + blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: datetime + :param is_sealed: + :type is_sealed: bool + :param rehydrate_priority: Possible values include: 'High', 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority + :param last_accessed_on: + :type last_accessed_on: datetime + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, + 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, + 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, + 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, + 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, + 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, + 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, + 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, + 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, + 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123', 'xml': {'name': 'LastAccessTime'}}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, rehydrate_priority=None, last_accessed_on=None, **kwargs) -> None: + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.etag = etag + self.content_length = content_length + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_md5 = content_md5 + self.content_disposition = content_disposition + self.cache_control = cache_control + self.blob_sequence_number = blob_sequence_number + self.blob_type = blob_type + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.copy_id = copy_id + self.copy_status = copy_status + self.copy_source = copy_source + self.copy_progress = copy_progress + self.copy_completion_time = copy_completion_time + self.copy_status_description = copy_status_description + self.server_encrypted = server_encrypted + self.incremental_copy = incremental_copy + self.destination_snapshot = destination_snapshot + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier = access_tier + self.access_tier_inferred = access_tier_inferred + self.archive_status = archive_status + self.customer_provided_key_sha256 = customer_provided_key_sha256 + self.encryption_scope = encryption_scope + self.access_tier_change_time = access_tier_change_time + self.tag_count = tag_count + self.expires_on = expires_on + self.is_sealed = is_sealed + self.rehydrate_priority = rehydrate_priority + self.last_accessed_on = last_accessed_on + + +class BlobTag(Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, + 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__(self, *, key: str, value: str, **kwargs) -> None: + super(BlobTag, self).__init__(**kwargs) + self.key = key + self.value = value + + +class BlobTags(Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__(self, *, blob_tag_set, **kwargs) -> None: + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = blob_tag_set + + +class Block(Model): + """Represents a single block in a block blob. It describes the block's ID and + size. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The base64 encoded block ID. + :type name: str + :param size: Required. The block size in bytes. + :type size: int + """ + + _validation = { + 'name': {'required': True}, + 'size': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, + } + _xml_map = { + } + + def __init__(self, *, name: str, size: int, **kwargs) -> None: + super(Block, self).__init__(**kwargs) + self.name = name + self.size = size + + +class BlockList(Model): + """BlockList. + + :param committed_blocks: + :type committed_blocks: list[~azure.storage.blob.models.Block] + :param uncommitted_blocks: + :type uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, + 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, + } + _xml_map = { + } + + def __init__(self, *, committed_blocks=None, uncommitted_blocks=None, **kwargs) -> None: + super(BlockList, self).__init__(**kwargs) + self.committed_blocks = committed_blocks + self.uncommitted_blocks = uncommitted_blocks + + +class BlockLookupList(Model): + """BlockLookupList. + + :param committed: + :type committed: list[str] + :param uncommitted: + :type uncommitted: list[str] + :param latest: + :type latest: list[str] + """ + + _attribute_map = { + 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, + 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, + 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, + } + _xml_map = { + 'name': 'BlockList' + } + + def __init__(self, *, committed=None, uncommitted=None, latest=None, **kwargs) -> None: + super(BlockLookupList, self).__init__(**kwargs) + self.committed = committed + self.uncommitted = uncommitted + self.latest = latest + + +class ClearRange(Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(ClearRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class ContainerCpkScopeInfo(Model): + """Additional parameters for create operation. + + :param default_encryption_scope: Optional. Version 2019-07-07 and later. + Specifies the default encryption scope to set on the container and use for + all future writes. + :type default_encryption_scope: str + :param prevent_encryption_scope_override: Optional. Version 2019-07-07 + and newer. If true, prevents any request from specifying a different + encryption scope than the scope set on the container. + :type prevent_encryption_scope_override: bool + """ + + _attribute_map = { + 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}}, + 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}}, + } + _xml_map = { + } + + def __init__(self, *, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None: + super(ContainerCpkScopeInfo, self).__init__(**kwargs) + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + + +class ContainerItem(Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. + :type properties: ~azure.storage.blob.models.ContainerProperties + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Container' + } + + def __init__(self, *, name: str, properties, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: + super(ContainerItem, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class ContainerProperties(Model): + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param lease_status: Possible values include: 'locked', 'unlocked' + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: 'available', 'leased', + 'expired', 'breaking', 'broken' + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: 'infinite', 'fixed' + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param public_access: Possible values include: 'container', 'blob' + :type public_access: str or ~azure.storage.blob.models.PublicAccessType + :param has_immutability_policy: + :type has_immutability_policy: bool + :param has_legal_hold: + :type has_legal_hold: bool + :param default_encryption_scope: + :type default_encryption_scope: str + :param prevent_encryption_scope_override: + :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, + 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, + 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, + 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, + 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, + 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + } + _xml_map = { + } + + def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None: + super(ContainerProperties, self).__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.public_access = public_access + self.has_immutability_policy = has_immutability_policy + self.has_legal_hold = has_legal_hold + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class CpkInfo(Model): + """Additional parameters for a set of operations. + + :param encryption_key: Optional. Specifies the encryption key to use to + encrypt the data provided in the request. If not specified, encryption is + performed with the root account encryption key. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_key: str + :param encryption_key_sha256: The SHA-256 hash of the provided encryption + key. Must be provided if the x-ms-encryption-key header is provided. + :type encryption_key_sha256: str + :param encryption_algorithm: The algorithm used to produce the encryption + key hash. Currently, the only accepted value is "AES256". Must be provided + if the x-ms-encryption-key header is provided. Possible values include: + 'AES256' + :type encryption_algorithm: str or + ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, + 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, + 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, + } + _xml_map = { + } + + def __init__(self, *, encryption_key: str=None, encryption_key_sha256: str=None, encryption_algorithm=None, **kwargs) -> None: + super(CpkInfo, self).__init__(**kwargs) + self.encryption_key = encryption_key + self.encryption_key_sha256 = encryption_key_sha256 + self.encryption_algorithm = encryption_algorithm + + +class CpkScopeInfo(Model): + """Additional parameters for a set of operations. + + :param encryption_scope: Optional. Version 2019-07-07 and later. + Specifies the name of the encryption scope to use to encrypt the data + provided in the request. If not specified, encryption is performed with + the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_scope: str + """ + + _attribute_map = { + 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}}, + } + _xml_map = { + } + + def __init__(self, *, encryption_scope: str=None, **kwargs) -> None: + super(CpkScopeInfo, self).__init__(**kwargs) + self.encryption_scope = encryption_scope + + +class DataLakeStorageError(Model): + """DataLakeStorageError. + + :param data_lake_storage_error_details: The service error response object. + :type data_lake_storage_error_details: + ~azure.storage.blob.models.DataLakeStorageErrorError + """ + + _attribute_map = { + 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, + } + _xml_map = { + } + + def __init__(self, *, data_lake_storage_error_details=None, **kwargs) -> None: + super(DataLakeStorageError, self).__init__(**kwargs) + self.data_lake_storage_error_details = data_lake_storage_error_details + + +class DataLakeStorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'DataLakeStorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'DataLakeStorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(DataLakeStorageErrorException, self).__init__(response=response) + + +class DataLakeStorageErrorError(Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: + super(DataLakeStorageErrorError, self).__init__(**kwargs) + self.code = code + self.message = message + + +class DelimitedTextConfiguration(Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator + :type column_separator: str + :param field_quote: Required. field quote + :type field_quote: str + :param record_separator: Required. record separator + :type record_separator: str + :param escape_char: Required. escape char + :type escape_char: str + :param headers_present: Required. has headers + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__(self, *, column_separator: str, field_quote: str, record_separator: str, escape_char: str, headers_present: bool, **kwargs) -> None: + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = column_separator + self.field_quote = field_quote + self.record_separator = record_separator + self.escape_char = escape_char + self.headers_present = headers_present + + +class DirectoryHttpHeaders(Model): + """Additional parameters for a set of operations, such as: Directory_create, + Directory_rename, Blob_rename. + + :param cache_control: Cache control for given resource + :type cache_control: str + :param content_type: Content type for given resource + :type content_type: str + :param content_encoding: Content encoding for given resource + :type content_encoding: str + :param content_language: Content language for given resource + :type content_language: str + :param content_disposition: Content disposition for given resource + :type content_disposition: str + """ + + _attribute_map = { + 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, + 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, + 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, + 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, + 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, *, cache_control: str=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, **kwargs) -> None: + super(DirectoryHttpHeaders, self).__init__(**kwargs) + self.cache_control = cache_control + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + + +class FilterBlobItem(Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tag_value: Required. + :type tag_value: str + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + 'tag_value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, + 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__(self, *, name: str, container_name: str, tag_value: str, **kwargs) -> None: + super(FilterBlobItem, self).__init__(**kwargs) + self.name = name + self.container_name = container_name + self.tag_value = tag_value + + +class FilterBlobSegment(Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, where: str, blobs, next_marker: str=None, **kwargs) -> None: + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.where = where + self.blobs = blobs + self.next_marker = next_marker + + +class GeoReplication(Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible + values include: 'live', 'bootstrap', 'unavailable' + :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All + primary writes preceding this value are guaranteed to be available for + read operations at the secondary. Primary writes after this point in time + may or may not be available for reads. + :type last_sync_time: datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, + } + _xml_map = { + } + + def __init__(self, *, status, last_sync_time, **kwargs) -> None: + super(GeoReplication, self).__init__(**kwargs) + self.status = status + self.last_sync_time = last_sync_time + + +class JsonTextConfiguration(Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__(self, *, record_separator: str, **kwargs) -> None: + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = record_separator + + +class KeyInfo(Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The date-time the key is active in ISO 8601 UTC + time + :type start: str + :param expiry: Required. The date-time the key expires in ISO 8601 UTC + time + :type expiry: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + } + _xml_map = { + } + + def __init__(self, *, start: str, expiry: str, **kwargs) -> None: + super(KeyInfo, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, + } + _xml_map = { + } + + def __init__(self, *, lease_id: str=None, **kwargs) -> None: + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListBlobsFlatSegmentResponse(Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobFlatListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: + super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + + +class ListBlobsHierarchySegmentResponse(Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, delimiter: str=None, next_marker: str=None, **kwargs) -> None: + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.delimiter = delimiter + self.segment = segment + self.next_marker = next_marker + + +class ListContainersSegmentResponse(Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param container_items: Required. + :type container_items: list[~azure.storage.blob.models.ContainerItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_items': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, container_items, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: + super(ListContainersSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.container_items = container_items + self.next_marker = next_marker + + +class Logging(Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be + logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be + logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be + logged. + :type write: bool + :param retention_policy: Required. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, + 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, + 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None: + super(Logging, self).__init__(**kwargs) + self.version = version + self.delete = delete + self.read = read + self.write = write + self.retention_policy = retention_policy + + +class Metrics(Model): + """a summary of request statistics grouped by API in hour or minute aggregates + for blobs. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + Blob service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None: + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class ModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param if_modified_since: Specify this header value to operate only on a + blob if it has been modified since the specified date/time. + :type if_modified_since: datetime + :param if_unmodified_since: Specify this header value to operate only on a + blob if it has not been modified since the specified date/time. + :type if_unmodified_since: datetime + :param if_match: Specify an ETag value to operate only on blobs with a + matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs + without a matching value. + :type if_none_match: str + :param if_tags: Specify a SQL where clause on blob tags to operate only on + blobs with a matching value. + :type if_tags: str + """ + + _attribute_map = { + 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, + 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, + 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, + 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}}, + } + _xml_map = { + } + + def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, if_tags: str=None, **kwargs) -> None: + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + self.if_tags = if_tags + + +class PageList(Model): + """the list of pages. + + :param page_range: + :type page_range: list[~azure.storage.blob.models.PageRange] + :param clear_range: + :type clear_range: list[~azure.storage.blob.models.ClearRange] + """ + + _attribute_map = { + 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, + 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, + } + _xml_map = { + } + + def __init__(self, *, page_range=None, clear_range=None, **kwargs) -> None: + super(PageList, self).__init__(**kwargs) + self.page_range = page_range + self.clear_range = clear_range + + +class PageRange(Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'PageRange' + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(PageRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class QueryFormat(Model): + """QueryFormat. + + :param type: Possible values include: 'delimited', 'json', 'arrow' + :type type: str or ~azure.storage.blob.models.QueryFormatType + :param delimited_text_configuration: + :type delimited_text_configuration: + ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: + :type json_text_configuration: + ~azure.storage.blob.models.JsonTextConfiguration + :param arrow_configuration: + :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, + 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration', 'xml': {'name': 'ArrowConfiguration'}}, + } + _xml_map = { + } + + def __init__(self, *, type=None, delimited_text_configuration=None, json_text_configuration=None, arrow_configuration=None, **kwargs) -> None: + super(QueryFormat, self).__init__(**kwargs) + self.type = type + self.delimited_text_configuration = delimited_text_configuration + self.json_text_configuration = json_text_configuration + self.arrow_configuration = arrow_configuration + + +class QueryRequest(Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL" . + :vartype query_type: str + :param expression: Required. a query statement + :type expression: str + :param input_serialization: + :type input_serialization: ~azure.storage.blob.models.QuerySerialization + :param output_serialization: + :type output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__(self, *, expression: str, input_serialization=None, output_serialization=None, **kwargs) -> None: + super(QueryRequest, self).__init__(**kwargs) + self.expression = expression + self.input_serialization = input_serialization + self.output_serialization = output_serialization + + +class QuerySerialization(Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}}, + } + _xml_map = { + } + + def __init__(self, *, format, **kwargs) -> None: + super(QuerySerialization, self).__init__(**kwargs) + self.format = format + + +class RetentionPolicy(Model): + """the retention policy which determines how long the associated data should + persist. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the storage service + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + + +class SequenceNumberAccessConditions(Model): + """Additional parameters for a set of operations, such as: + PageBlob_upload_pages, PageBlob_clear_pages, + PageBlob_upload_pages_from_url. + + :param if_sequence_number_less_than_or_equal_to: Specify this header value + to operate only on a blob if it has a sequence number less than or equal + to the specified. + :type if_sequence_number_less_than_or_equal_to: long + :param if_sequence_number_less_than: Specify this header value to operate + only on a blob if it has a sequence number less than the specified. + :type if_sequence_number_less_than: long + :param if_sequence_number_equal_to: Specify this header value to operate + only on a blob if it has the specified sequence number. + :type if_sequence_number_equal_to: long + """ + + _attribute_map = { + 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, + 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, + 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, + } + _xml_map = { + } + + def __init__(self, *, if_sequence_number_less_than_or_equal_to: int=None, if_sequence_number_less_than: int=None, if_sequence_number_equal_to: int=None, **kwargs) -> None: + super(SequenceNumberAccessConditions, self).__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to + self.if_sequence_number_less_than = if_sequence_number_less_than + self.if_sequence_number_equal_to = if_sequence_number_equal_to + + +class SignedIdentifier(Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id + :type id: str + :param access_policy: + :type access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + 'name': 'SignedIdentifier' + } + + def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param source_if_modified_since: Specify this header value to operate only + on a blob if it has been modified since the specified date/time. + :type source_if_modified_since: datetime + :param source_if_unmodified_since: Specify this header value to operate + only on a blob if it has not been modified since the specified date/time. + :type source_if_unmodified_since: datetime + :param source_if_match: Specify an ETag value to operate only on blobs + with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on + blobs without a matching value. + :type source_if_none_match: str + :param source_if_tags: Specify a SQL where clause on blob tags to operate + only on blobs with a matching value. + :type source_if_tags: str + """ + + _attribute_map = { + 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, + 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, + 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, + 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, + 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}}, + } + _xml_map = { + } + + def __init__(self, *, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match: str=None, source_if_none_match: str=None, source_if_tags: str=None, **kwargs) -> None: + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_tags = source_if_tags + + +class StaticWebsite(Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether this account is hosting a + static website + :type enabled: bool + :param index_document: The default name of the index page under each + directory + :type index_document: str + :param error_document404_path: The absolute path of the custom 404 page + :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index + page + :type default_index_document_path: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, + 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, default_index_document_path: str=None, **kwargs) -> None: + super(StaticWebsite, self).__init__(**kwargs) + self.enabled = enabled + self.index_document = index_document + self.error_document404_path = error_document404_path + self.default_index_document_path = default_index_document_path + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, *, message: str=None, **kwargs) -> None: + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage Service Properties. + + :param logging: + :type logging: ~azure.storage.blob.models.Logging + :param hour_metrics: + :type hour_metrics: ~azure.storage.blob.models.Metrics + :param minute_metrics: + :type minute_metrics: ~azure.storage.blob.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.blob.models.CorsRule] + :param default_service_version: The default version to use for requests to + the Blob service if an incoming request's version is not specified. + Possible values include version 2008-10-27 and all more recent versions + :type default_service_version: str + :param delete_retention_policy: + :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :param static_website: + :type static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, + 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, + 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, + } + _xml_map = { + } + + def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, default_service_version: str=None, delete_retention_policy=None, static_website=None, **kwargs) -> None: + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = logging + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.default_service_version = default_service_version + self.delete_retention_policy = delete_retention_policy + self.static_website = static_website + + +class StorageServiceStats(Model): + """Stats for the storage service. + + :param geo_replication: + :type geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, + } + _xml_map = { + } + + def __init__(self, *, geo_replication=None, **kwargs) -> None: + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = geo_replication + + +class UserDelegationKey(Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :param signed_oid: Required. The Azure Active Directory object ID in GUID + format. + :type signed_oid: str + :param signed_tid: Required. The Azure Active Directory tenant ID in GUID + format + :type signed_tid: str + :param signed_start: Required. The date-time the key is active + :type signed_start: datetime + :param signed_expiry: Required. The date-time the key expires + :type signed_expiry: datetime + :param signed_service: Required. Abbreviation of the Azure Storage service + that accepts the key + :type signed_service: str + :param signed_version: Required. The service version that created the key + :type signed_version: str + :param value: Required. The key as a base64 string + :type value: str + """ + + _validation = { + 'signed_oid': {'required': True}, + 'signed_tid': {'required': True}, + 'signed_start': {'required': True}, + 'signed_expiry': {'required': True}, + 'signed_service': {'required': True}, + 'signed_version': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, + 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, + 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, + 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, + 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, + 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, + 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, + } + _xml_map = { + } + + def __init__(self, *, signed_oid: str, signed_tid: str, signed_start, signed_expiry, signed_service: str, signed_version: str, value: str, **kwargs) -> None: + super(UserDelegationKey, self).__init__(**kwargs) + self.signed_oid = signed_oid + self.signed_tid = signed_tid + self.signed_start = signed_start + self.signed_expiry = signed_expiry + self.signed_service = signed_service + self.signed_version = signed_version + self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py new file mode 100644 index 0000000..1ea0453 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._directory_operations import DirectoryOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +__all__ = [ + 'ServiceOperations', + 'ContainerOperations', + 'DirectoryOperations', + 'BlobOperations', + 'PageBlobOperations', + 'AppendBlobOperations', + 'BlockBlobOperations', +] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py new file mode 100644 index 0000000..000810a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py @@ -0,0 +1,694 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class AppendBlobOperations(object): + """AppendBlobOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_blob_type = "AppendBlob" + + def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{containerName}/{blob}'} + + def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Append Block operation commits a new block of data to the end of an + existing append blob. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append + Block is supported only on version 2015-02-21 version or later. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + max_size = None + if append_position_access_conditions is not None: + max_size = append_position_access_conditions.max_size + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "appendblock" + + # Construct URL + url = self.append_block.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + append_block.metadata = {'url': '/{containerName}/{blob}'} + + def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """The Append Block operation commits a new block of data to the end of an + existing append blob where the contents are read from a source url. The + Append Block operation is permitted only if the blob was created with + x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param content_length: The length of the request. + :type content_length: long + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range + of bytes that must be read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + max_size = None + if append_position_access_conditions is not None: + max_size = append_position_access_conditions.max_size + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + comp = "appendblock" + + # Construct URL + url = self.append_block_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, cls=None, **kwargs): + """The Seal operation seals the Append Blob to make it read-only. Seal is + supported only on version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + + comp = "seal" + + # Construct URL + url = self.seal.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py new file mode 100644 index 0000000..394a519 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py @@ -0,0 +1,3065 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class BlobOperations(object): + """BlobOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_requires_sync: . Constant value: "true". + :ivar x_ms_copy_action: . Constant value: "abort". + :ivar restype: . Constant value: "account". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_requires_sync = "true" + self.x_ms_copy_action = "abort" + self.restype = "account" + + def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Download operation reads or downloads a blob from the system, + including its metadata and properties. You can also call Download to + read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param range_get_content_md5: When set to true and specified together + with the Range, the service returns the MD5 hash for the range, as + long as the range is less than or equal to 4 MB in size. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified + together with the Range, the service returns the CRC64 hash for the + range, as long as the range is less than or equal to 4 MB in size. + :type range_get_content_crc64: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.download.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if range_get_content_crc64 is not None: + header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + download.metadata = {'url': '/{containerName}/{blob}'} + + def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Get Properties operation returns all user-defined metadata, + standard HTTP properties, and system properties for the blob. It does + not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), + 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), + 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), + 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), + 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), + 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{containerName}/{blob}'} + + def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """If the storage account's soft delete feature is disabled then, when a + blob is deleted, it is permanently removed from the storage account. If + the storage account's soft delete feature is enabled, then, when a blob + is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for + the number of days specified by the DeleteRetentionPolicy section of + [Storage service properties] (Set-Blob-Service-Properties.md). After + the specified number of days has passed, the blob's data is permanently + removed from the storage account. Note that you continue to be charged + for the soft-deleted blob's storage until it is permanently removed. + Use the List Blobs API and specify the "include=deleted" query + parameter to discover which blobs and snapshots have been soft deleted. + You can then use the Undelete Blob API to restore a soft-deleted blob. + All other operations on a soft-deleted blob or snapshot causes the + service to return an HTTP status code of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param delete_snapshots: Required if the blob has associated + snapshots. Specify one of the following two options: include: Delete + the base blob and all of its snapshots. only: Delete only the blob's + snapshots and not the blob itself. Possible values include: 'include', + 'only' + :type delete_snapshots: str or + ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{containerName}/{blob}'} + + def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and + directories. The value is a comma-separated list of access control + entries. Each access control entry (ACE) consists of a scope, a type, + a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Get the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the identity values returned in + the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "getAccessControl" + + # Construct URL + url = self.get_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + get_access_control.metadata = {'url': '/{filesystem}/{path}'} + + def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """Rename a blob/file. By default, the destination is overwritten and if + the destination already exists and has a lease the lease is broken. + This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value + must have the following format: "/{filesysystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_rename_mode: Determines the behavior of the rename + operation. Possible values include: 'legacy', 'posix' + :type path_rename_mode: str or + ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be + stored with the file or directory, in the format of a comma-separated + list of name and value pairs "n1=v1, n2=v2, ...", where each value is + base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled + for the account. This umask restricts permission settings for file and + directory, and will only be applied when default Acl does not exist in + parent directory. If the umask bit has set, it means that the + corresponding permission will be disabled. Otherwise the corresponding + permission will be determined by the permission. A 4-digit octal + notation (e.g. 0022) is supported here. If no umask was specified, a + default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param directory_http_headers: Additional parameters for the operation + :type directory_http_headers: + ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if directory_http_headers is not None: + cache_control = directory_http_headers.cache_control + content_type = None + if directory_http_headers is not None: + content_type = directory_http_headers.content_type + content_encoding = None + if directory_http_headers is not None: + content_encoding = directory_http_headers.content_encoding + content_language = None + if directory_http_headers is not None: + content_language = directory_http_headers.content_language + content_disposition = None + if directory_http_headers is not None: + content_disposition = directory_http_headers.content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + # Construct URL + url = self.rename.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + rename.metadata = {'url': '/{filesystem}/{path}'} + + def undelete(self, timeout=None, request_id=None, cls=None, **kwargs): + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "undelete" + + # Construct URL + url = self.undelete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + undelete.metadata = {'url': '/{containerName}/{blob}'} + + def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, cls=None, **kwargs): + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', + 'RelativeToNow', 'Absolute' + :type expiry_options: str or + ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param expires_on: The time to set the blob to expiry + :type expires_on: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "expiry" + + # Construct URL + url = self.set_expiry.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_expiry.metadata = {'url': '/{containerName}/{blob}'} + + def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "properties" + + # Construct URL + url = self.set_http_headers.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_http_headers.metadata = {'url': '/{containerName}/{blob}'} + + def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Set Blob Metadata operation sets user-defined metadata for the + specified blob as one or more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{containerName}/{blob}'} + + def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{containerName}/{blob}'} + + def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{containerName}/{blob}'} + + def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "renew" + + # Construct URL + url = self.renew_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + renew_lease.metadata = {'url': '/{containerName}/{blob}'} + + def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{containerName}/{blob}'} + + def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease Blob operation establishes and manages a lock on a + blob for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param break_period: For a break operation, proposed duration the + lease should continue before it is broken, in seconds, between 0 and + 60. This break period is only used if it is shorter than the time + remaining on the lease. If longer, the time remaining on the lease is + used. A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break period. + If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, + and an infinite lease breaks immediately. + :type break_period: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{containerName}/{blob}'} + + def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "snapshot" + + # Construct URL + url = self.create_snapshot.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_snapshot.metadata = {'url': '/{containerName}/{blob}'} + + def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + """The Start Copy From URL operation copies a blob or an internet resource + to a new blob. + + :param copy_source: Specifies the name of the source page blob + snapshot. This value is a URL of up to 2 KB in length that specifies a + page blob snapshot. The value should be URL-encoded as it would appear + in a request URI. The source blob must either be public or must be + authenticated via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which + to rehydrate an archived blob. Possible values include: 'High', + 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. + Service version 2019-12-12 and newer. + :type seal_blob: bool + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + source_if_tags = None + if source_modified_access_conditions is not None: + source_if_tags = source_modified_access_conditions.source_if_tags + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.start_copy_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + """The Copy From URL operation copies a blob or an internet resource to a + new blob. It will not return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob + snapshot. This value is a URL of up to 2 KB in length that specifies a + page blob snapshot. The value should be URL-encoded as it would appear + in a request URI. The source blob must either be public or must be + authenticated via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.copy_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + copy_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + """The Abort Copy From URL operation aborts a pending Copy From URL + operation, and leaves a destination blob with zero length and full + metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id + header of the original Copy Blob operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "copy" + + # Construct URL + url = self.abort_copy_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Set Tier operation sets the tier on a blob. The operation is + allowed on a page blob in a premium storage account and on a block blob + in a blob storage account (locally redundant storage only). A premium + page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. Possible values + include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', + 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which + to rehydrate an archived blob. Possible values include: 'High', + 'Standard' + :type rehydrate_priority: str or + ~azure.storage.blob.models.RehydratePriority + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "tier" + + # Construct URL + url = self.set_tier.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_tier.metadata = {'url': '/{containerName}/{blob}'} + + def get_account_info(self, cls=None, **kwargs): + """Returns the sku name and account kind . + + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.get_account_info.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), + 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_account_info.metadata = {'url': '/{containerName}/{blob}'} + + def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Query operation enables users to select/project on blob data by + providing simple query expressions. + + :param query_request: the query request + :type query_request: ~azure.storage.blob.models.QueryRequest + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "query" + + # Construct URL + url = self.query.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + query.metadata = {'url': '/{containerName}/{blob}'} + + def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, cls=None, **kwargs): + """The Get Tags operation enables users to get the tags associated with a + blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: BlobTags or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "tags" + + # Construct URL + url = self.get_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BlobTags', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} + + def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, cls=None, **kwargs): + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param tags: Blob tags + :type tags: ~azure.storage.blob.models.BlobTags + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "tags" + + # Construct URL + url = self.set_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags') + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py new file mode 100644 index 0000000..8228c47 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py @@ -0,0 +1,833 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class BlockBlobOperations(object): + """BlockBlobOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_blob_type = "BlockBlob" + + def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Upload Block Blob operation updates the content of an existing + block blob. Updating an existing block blob overwrites any existing + metadata on the blob. Partial updates are not supported with Put Blob; + the content of the existing blob is overwritten with the content of the + new blob. To perform a partial update of the content of a block blob, + use the Put Block List operation. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.upload.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload.metadata = {'url': '/{containerName}/{blob}'} + + def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, cls=None, **kwargs): + """The Stage Block operation creates a new block to be committed as part + of a blob. + + :param block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data + :type body: Generator + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + + comp = "block" + + # Construct URL + url = self.stage_block.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + stage_block.metadata = {'url': '/{containerName}/{blob}'} + + def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """The Stage Block operation creates a new block to be committed as part + of a blob where the contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range + of bytes that must be read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + comp = "block" + + # Construct URL + url = self.stage_block_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. In order to be written as part of a + blob, a block must have been successfully written to the server in a + prior Put Block operation. You can call Put Block List to update a blob + by uploading only those blocks that have changed, then committing the + new and existing blocks together. You can do this by specifying whether + to commit a block from the committed block list or from the uncommitted + block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "blocklist" + + # Construct URL + url = self.commit_block_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + body_content = self._serialize.body(blocks, 'BlockLookupList') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + commit_block_list.metadata = {'url': '/{containerName}/{blob}'} + + def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param list_type: Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: BlockList or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "blocklist" + + # Construct URL + url = self.get_block_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BlockList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py new file mode 100644 index 0000000..5730483 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py @@ -0,0 +1,1400 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ContainerOperations(object): + """ContainerOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, cls=None, **kwargs): + """creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param access: Specifies whether data in the container may be accessed + publicly and the level of access. Possible values include: + 'container', 'blob' + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param container_cpk_scope_info: Additional parameters for the + operation + :type container_cpk_scope_info: + ~azure.storage.blob.models.ContainerCpkScopeInfo + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + default_encryption_scope = None + if container_cpk_scope_info is not None: + default_encryption_scope = container_cpk_scope_info.default_encryption_scope + prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + + restype = "container" + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if default_encryption_scope is not None: + header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str') + if prevent_encryption_scope_override is not None: + header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{containerName}'} + + def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + """returns all user-defined metadata and system properties for the + specified container. The data returned does not include the container's + list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + restype = "container" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), + 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), + 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), + 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')), + 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{containerName}'} + + def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """operation marks the specified container for deletion. The container and + any blobs contained within it are later deleted during garbage + collection. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + restype = "container" + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{containerName}'} + + def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """operation sets one or more user-defined name-value pairs for the + specified container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + + restype = "container" + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{containerName}'} + + def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + """gets the permissions for the specified container. The permissions + indicate whether container data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + restype = "container" + comp = "acl" + + # Construct URL + url = self.get_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[SignedIdentifier]', response) + header_dict = { + 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_access_policy.metadata = {'url': '/{containerName}'} + + def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """sets the permissions for the specified container. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param container_acl: the acls for the container + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param access: Specifies whether data in the container may be accessed + publicly and the level of access. Possible values include: + 'container', 'blob' + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + restype = "container" + comp = "acl" + + # Construct URL + url = self.set_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} + if container_acl is not None: + body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{containerName}'} + + def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, cls=None, **kwargs): + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_container_name: Optional. Version 2019-12-12 and + laster. Specifies the name of the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and + laster. Specifies the version of the deleted container to restore. + :type deleted_container_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{containerName}'} + + def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{containerName}'} + + def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{containerName}'} + + def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "renew" + + # Construct URL + url = self.renew_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + renew_lease.metadata = {'url': '/{containerName}'} + + def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param break_period: For a break operation, proposed duration the + lease should continue before it is broken, in seconds, between 0 and + 60. This break period is only used if it is shorter than the time + remaining on the lease. If longer, the time remaining on the lease is + used. A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break period. + If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, + and an infinite lease breaks immediately. + :type break_period: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{containerName}'} + + def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """[Update] establishes and manages a lock on a container for delete + operations. The lock duration can be 15 to 60 seconds, or can be + infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + comp = "lease" + restype = "container" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{containerName}'} + + def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): + """[Update] The List Blobs operation returns a list of the blobs under the + specified container. + + :param prefix: Filters the results to return only containers whose + name begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListBlobsFlatSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "list" + + # Construct URL + url = self.list_blob_flat_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_blob_flat_segment.metadata = {'url': '/{containerName}'} + + def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): + """[Update] The List Blobs operation returns a list of the blobs under the + specified container. + + :param delimiter: When the request includes this parameter, the + operation returns a BlobPrefix element in the response body that acts + as a placeholder for all blobs whose names begin with the same + substring up to the appearance of the delimiter character. The + delimiter may be a single character or a string. + :type delimiter: str + :param prefix: Filters the results to return only containers whose + name begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListBlobsHierarchySegmentResponse or the result of + cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "list" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} + + def get_account_info(self, cls=None, **kwargs): + """Returns the sku name and account kind . + + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "account" + comp = "properties" + + # Construct URL + url = self.get_account_info.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), + 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py new file mode 100644 index 0000000..c2bf317 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py @@ -0,0 +1,739 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class DirectoryOperations(object): + """DirectoryOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar resource: . Constant value: "directory". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.resource = "directory" + + def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Create a directory. By default, the destination is overwritten and if + the destination already exists and has a lease the lease is broken. + This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param directory_properties: Optional. User-defined properties to be + stored with the file or directory, in the format of a comma-separated + list of name and value pairs "n1=v1, n2=v2, ...", where each value is + base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled + for the account. This umask restricts permission settings for file and + directory, and will only be applied when default Acl does not exist in + parent directory. If the umask bit has set, it means that the + corresponding permission will be disabled. Otherwise the corresponding + permission will be determined by the permission. A 4-digit octal + notation (e.g. 0022) is supported here. If no umask was specified, a + default umask - 0027 will be used. + :type posix_umask: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param directory_http_headers: Additional parameters for the operation + :type directory_http_headers: + ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if directory_http_headers is not None: + cache_control = directory_http_headers.cache_control + content_type = None + if directory_http_headers is not None: + content_type = directory_http_headers.content_type + content_encoding = None + if directory_http_headers is not None: + content_encoding = directory_http_headers.content_encoding + content_language = None + if directory_http_headers is not None: + content_language = directory_http_headers.content_language + content_disposition = None + if directory_http_headers is not None: + content_disposition = directory_http_headers.content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') + + # Construct headers + header_parameters = {} + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}/{path}'} + + def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """Rename a directory. By default, the destination is overwritten and if + the destination already exists and has a lease the lease is broken. + This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value + must have the following format: "/{filesysystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are + renamed with each invocation is limited. If the number of paths to be + renamed exceeds this limit, a continuation token is returned in this + response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the + rename operation to continue renaming the directory. + :type marker: str + :param path_rename_mode: Determines the behavior of the rename + operation. Possible values include: 'legacy', 'posix' + :type path_rename_mode: str or + ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be + stored with the file or directory, in the format of a comma-separated + list of name and value pairs "n1=v1, n2=v2, ...", where each value is + base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled + for the account. This umask restricts permission settings for file and + directory, and will only be applied when default Acl does not exist in + parent directory. If the umask bit has set, it means that the + corresponding permission will be disabled. Otherwise the corresponding + permission will be determined by the permission. A 4-digit octal + notation (e.g. 0022) is supported here. If no umask was specified, a + default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param directory_http_headers: Additional parameters for the operation + :type directory_http_headers: + ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if directory_http_headers is not None: + cache_control = directory_http_headers.cache_control + content_type = None + if directory_http_headers is not None: + content_type = directory_http_headers.content_type + content_encoding = None + if directory_http_headers is not None: + content_encoding = directory_http_headers.content_encoding + content_language = None + if directory_http_headers is not None: + content_language = directory_http_headers.content_language + content_disposition = None + if directory_http_headers is not None: + content_disposition = directory_http_headers.content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + # Construct URL + url = self.rename.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + rename.metadata = {'url': '/{filesystem}/{path}'} + + def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Deletes the directory. + + :param recursive_directory_delete: If "true", all paths beneath the + directory will be deleted. If "false" and the directory is non-empty, + an error occurs. + :type recursive_directory_delete: bool + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are + renamed with each invocation is limited. If the number of paths to be + renamed exceeds this limit, a continuation token is returned in this + response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the + rename operation to continue renaming the directory. + :type marker: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}/{path}'} + + def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a + directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical + Namespace is enabled for the account. Sets POSIX access permissions + for the file owner, the file owning group, and others. Each class may + be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. + 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and + directories. The value is a comma-separated list of access control + entries. Each access control entry (ACE) consists of a scope, a type, + a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Get the owner, group, permissions, or access control list for a + directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the identity values returned in + the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`DataLakeStorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "getAccessControl" + + # Construct URL + url = self.get_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.DataLakeStorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + } + return cls(response, None, response_headers) + get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py new file mode 100644 index 0000000..fedc96c --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py @@ -0,0 +1,1399 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class PageBlobOperations(object): + """PageBlobOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_blob_type = "PageBlob" + + def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The Create operation creates a new page blob. + + :param content_length: The length of the request. + :type content_length: long + :param blob_content_length: This header specifies the maximum size for + the page blob, up to 1 TB. The page blob size must be aligned to a + 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. + Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', + 'P40', 'P50', 'P60', 'P70', 'P80' + :type tier: str or + ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair + associated with the blob. If no name-value pairs are specified, the + operation will copy the metadata from the source blob or file to the + destination blob. If one or more name-value pairs are specified, the + destination blob is created with the specified metadata, and metadata + is not copied from the source blob or file. Note that beginning with + version 2009-09-19, metadata names must adhere to the naming rules for + C# identifiers. See Naming and Referencing Containers, Blobs, and + Metadata for more information. + :type metadata: str + :param blob_sequence_number: Set for page blobs only. The sequence + number is a user-controlled value that you can use to track requests. + The value of the sequence number must be between 0 and 2^63 - 1. + :type blob_sequence_number: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param blob_http_headers: Additional parameters for the operation + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + blob_content_type = None + if blob_http_headers is not None: + blob_content_type = blob_http_headers.blob_content_type + blob_content_encoding = None + if blob_http_headers is not None: + blob_content_encoding = blob_http_headers.blob_content_encoding + blob_content_language = None + if blob_http_headers is not None: + blob_content_language = blob_http_headers.blob_content_language + blob_content_md5 = None + if blob_http_headers is not None: + blob_content_md5 = blob_http_headers.blob_content_md5 + blob_cache_control = None + if blob_http_headers is not None: + blob_cache_control = blob_http_headers.blob_cache_control + blob_content_disposition = None + if blob_http_headers is not None: + blob_content_disposition = blob_http_headers.blob_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') + if blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') + if blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') + if blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') + if blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') + if blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') + if blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{containerName}/{blob}'} + + def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Upload Pages operation writes a range of pages to a page blob. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Additional parameters for + the operation + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_sequence_number_less_than_or_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + if_sequence_number_less_than = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + if_sequence_number_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "page" + page_write = "update" + + # Construct URL + url = self.upload_pages.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') + if if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') + if if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_pages.metadata = {'url': '/{containerName}/{blob}'} + + def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Additional parameters for + the operation + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_sequence_number_less_than_or_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + if_sequence_number_less_than = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + if_sequence_number_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "page" + page_write = "clear" + + # Construct URL + url = self.clear_pages.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') + if if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') + if if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + clear_pages.metadata = {'url': '/{containerName}/{blob}'} + + def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The + length of this range should match the ContentLength header and + x-ms-range/Range destination range header. + :type source_range: str + :param content_length: The length of the request. + :type content_length: long + :param range: The range of bytes to which the source range would be + written. The range should be 512 aligned and range-end is required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range + of bytes that must be read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Additional parameters for + the operation + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_sequence_number_less_than_or_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + if_sequence_number_less_than = None + if sequence_number_access_conditions is not None: + if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + if_sequence_number_equal_to = None + if sequence_number_access_conditions is not None: + if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + + comp = "page" + page_write = "update" + + # Construct URL + url = self.upload_pages_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') + if if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') + if if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Get Page Ranges operation returns the list of valid page ranges for + a page blob or snapshot of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "pagelist" + + # Construct URL + url = self.get_page_ranges.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PageList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} + + def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """The Get Page Ranges Diff operation returns the list of valid page + ranges for a page blob that were changed between target blob and + previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The + prevsnapshot parameter is a DateTime value that specifies that the + response will contain only pages that were changed between target blob + and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot + specified by prevsnapshot is the older of the two. Note that + incremental snapshots are currently supported only for blobs created + on or after January 1, 2016. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in + service versions 2019-04-19 and after and specifies the URL of a + previous snapshot of the target blob. The response will only contain + pages that were changed between the target blob and its previous + snapshot. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified + range. + :type range: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "pagelist" + + # Construct URL + url = self.get_page_ranges_diff.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if prevsnapshot is not None: + query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if prev_snapshot_url is not None: + header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PageList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} + + def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for + the page blob, up to 1 TB. The page blob size must be aligned to a + 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Additional parameters for the operation + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + encryption_scope = None + if cpk_scope_info is not None: + encryption_scope = cpk_scope_info.encryption_scope + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "properties" + + # Construct URL + url = self.resize.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + resize.metadata = {'url': '/{containerName}/{blob}'} + + def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the + x-ms-blob-sequence-number header is set for the request. This property + applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. Possible values include: + 'max', 'update', 'increment' + :type sequence_number_action: str or + ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence + number is a user-controlled value that you can use to track requests. + The value of the sequence number must be between 0 and 2^63 - 1. + :type blob_sequence_number: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "properties" + + # Construct URL + url = self.update_sequence_number.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} + + def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): + """The Copy Incremental operation copies a snapshot of the source page + blob to a destination page blob. The snapshot is copied such that only + the differential changes between the previously copied snapshot are + transferred to the destination. The copied snapshots are complete + copies of the original snapshot and can be read or copied from as + usual. This API is supported since REST version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob + snapshot. This value is a URL of up to 2 KB in length that specifies a + page blob snapshot. The value should be URL-encoded as it would appear + in a request URI. The source blob must either be public or must be + authenticated via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + comp = "incrementalcopy" + + # Construct URL + url = self.copy_incremental.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py new file mode 100644 index 0000000..0a49915 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py @@ -0,0 +1,663 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs): + """Sets properties for a storage account's Blob service endpoint, + including properties for Storage Analytics and CORS (Cross-Origin + Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: + ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/'} + + def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): + """gets the properties of a storage account's Blob service, including + properties for Storage Analytics and CORS (Cross-Origin Resource + Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "properties" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceProperties', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_properties.metadata = {'url': '/'} + + def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): + """Retrieves statistics related to replication for the Blob service. It is + only available on the secondary location endpoint when read-access + geo-redundant replication is enabled for the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceStats or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "stats" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceStats', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_statistics.metadata = {'url': '/'} + + def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): + """The List Containers Segment operation returns a list of the containers + under the specified account. + + :param prefix: Filters the results to return only containers whose + name begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the container's + metadata be returned as part of the response body. + :type include: list[str or + ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListContainersSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_containers_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListContainersSegmentResponse', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_containers_segment.metadata = {'url': '/'} + + def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=None, **kwargs): + """Retrieves a user delegation key for the Blob service. This is only a + valid operation when using bearer token authentication. + + :param key_info: + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: UserDelegationKey or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "service" + comp = "userdelegationkey" + + # Construct URL + url = self.get_user_delegation_key.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + body_content = self._serialize.body(key_info, 'KeyInfo') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('UserDelegationKey', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_user_delegation_key.metadata = {'url': '/'} + + def get_account_info(self, cls=None, **kwargs): + """Returns the sku name and account kind . + + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "account" + comp = "properties" + + # Construct URL + url = self.get_account_info.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), + 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_account_info.metadata = {'url': '/'} + + def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, cls=None, **kwargs): + """The Batch operation allows multiple API calls to be embedded into a + single HTTP request. + + :param body: Initial data + :type body: Generator + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must + be multipart/mixed with a batch boundary. Example header value: + multipart/mixed; boundary=batch_ + :type multipart_content_type: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "batch" + + # Construct URL + url = self.submit_batch.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + submit_batch.metadata = {'url': '/'} + + def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, cls=None, **kwargs): + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param where: Filters the results to return only to return only blobs + whose tags match the specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "blobs" + + # Construct URL + url = self.filter_blobs.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FilterBlobSegment', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + filter_blobs.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py new file mode 100644 index 0000000..6ef707d --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2020-02-10" + diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_lease.py b/azure/multiapi/storagev2/blob/v2020_02_10/_lease.py new file mode 100644 index 0000000..1fd668c --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_lease.py @@ -0,0 +1,331 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._generated.models import StorageErrorException +from ._serialize import get_modify_conditions + +if TYPE_CHECKING: + from datetime import datetime + + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(object): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.BlobClient or + ~azure.storage.blob.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'blob_name'): + self._client = client._client.blob # type: ignore # pylint: disable=protected-access + elif hasattr(client, 'container_name'): + self._client = client._client.container # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use either BlobClient or ContainerClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, **Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py new file mode 100644 index 0000000..f1dd70f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py @@ -0,0 +1,166 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.paging import PageIterator, ItemPaged +from ._deserialize import get_blob_properties_from_generated_code +from ._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix +from ._models import BlobProperties +from ._shared.models import DictMixin +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + def _extract_data_cb(self, get_next_return): + continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class BlobPrefix(ItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str next_marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_models.py b/azure/multiapi/storagev2/blob/v2020_02_10/_models.py new file mode 100644 index 0000000..a7658cc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_models.py @@ -0,0 +1,1173 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from enum import Enum + +from azure.core.paging import PageIterator +from ._generated.models import FilterBlobItem, ArrowField + +from ._shared import decode_base64_to_text +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import Logging as GeneratedLogging +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import StaticWebsite as GeneratedStaticWebsite +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import AccessPolicy as GenAccessPolicy +from ._generated.models import StorageErrorException + + +class BlobType(str, Enum): + + BlockBlob = "BlockBlob" + PageBlob = "PageBlob" + AppendBlob = "AppendBlob" + + +class BlockState(str, Enum): + """Block blob block types.""" + + Committed = 'Committed' #: Committed blocks. + Latest = 'Latest' #: Latest blocks. + Uncommitted = 'Uncommitted' #: Uncommitted blocks. + + +class StandardBlobTier(str, Enum): + """ + Specifies the blob tier to set the blob to. This is only applicable for + block blobs on standard storage accounts. + """ + + Archive = 'Archive' #: Archive + Cool = 'Cool' #: Cool + Hot = 'Hot' #: Hot + + +class PremiumPageBlobTier(str, Enum): + """ + Specifies the page blob tier to set the blob to. This is only applicable to page + blobs on premium storage accounts. Please take a look at: + https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets + for detailed information on the corresponding IOPS and throughput per PageBlobTier. + """ + + P4 = 'P4' #: P4 Tier + P6 = 'P6' #: P6 Tier + P10 = 'P10' #: P10 Tier + P20 = 'P20' #: P20 Tier + P30 = 'P30' #: P30 Tier + P40 = 'P40' #: P40 Tier + P50 = 'P50' #: P50 Tier + P60 = 'P60' #: P60 Tier + + +class SequenceNumberAction(str, Enum): + """Sequence number actions.""" + + Increment = 'increment' + """ + Increments the value of the sequence number by 1. If specifying this option, + do not include the x-ms-blob-sequence-number header. + """ + + Max = 'max' + """ + Sets the sequence number to be the higher of the value included with the + request and the value currently stored for the blob. + """ + + Update = 'update' + """Sets the sequence number to the value included with the request.""" + + +class PublicAccess(str, Enum): + """ + Specifies whether data in the container may be accessed publicly and the level of access. + """ + + OFF = 'off' + """ + Specifies that there is no public read access for both the container and blobs within the container. + Clients cannot enumerate the containers within the storage account as well as the blobs within the container. + """ + + Blob = 'blob' + """ + Specifies public read access for blobs. Blob data within this container can be read + via anonymous request, but container data is not available. Clients cannot enumerate + blobs within the container via anonymous request. + """ + + Container = 'container' + """ + Specifies full public read access for container and blob data. Clients can enumerate + blobs within the container via anonymous request, but cannot enumerate containers + within the storage account. + """ + + +class BlobAnalyticsLogging(GeneratedLogging): + """Azure Analytics Logging settings. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool delete: + Indicates whether all delete requests should be logged. The default value is `False`. + :keyword bool read: + Indicates whether all read requests should be logged. The default value is `False`. + :keyword bool write: + Indicates whether all write requests should be logged. The default value is `False`. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.delete = kwargs.get('delete', False) + self.read = kwargs.get('read', False) + self.write = kwargs.get('write', False) + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + delete=generated.delete, + read=generated.read, + write=generated.write, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for blobs. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool enabled: + Indicates whether metrics are enabled for the Blob service. + The default value is `False`. + :keyword bool include_apis: + Indicates whether metrics should generate summary statistics for called API operations. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + :param bool enabled: + Indicates whether a retention policy is enabled for the storage service. + The default value is False. + :param int days: + Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. If enabled=True, the number of days must be specified. + """ + + def __init__(self, enabled=False, days=None): + self.enabled = enabled + self.days = days + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class StaticWebsite(GeneratedStaticWebsite): + """The properties that enable an account to host a static website. + + :keyword bool enabled: + Indicates whether this account is hosting a static website. + The default value is `False`. + :keyword str index_document: + The default name of the index page under each directory. + :keyword str error_document404_path: + The absolute path of the custom 404 page. + :keyword str default_index_document_path: + Absolute path of the default index page. + """ + + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled', False) + if self.enabled: + self.index_document = kwargs.get('index_document') + self.error_document404_path = kwargs.get('error_document404_path') + self.default_index_document_path = kwargs.get('default_index_document_path') + else: + self.index_document = None + self.error_document404_path = None + self.default_index_document_path = None + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + index_document=generated.index_document, + error_document404_path=generated.error_document404_path, + default_index_document_path=generated.default_index_document_path + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ContainerProperties(DictMixin): + """Blob container's properties class. + + Returned ``ContainerProperties`` instances expose these values through a + dictionary interface, for example: ``container_props["last_modified"]``. + Additionally, the container name is available as ``container_props["name"]``. + + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the container was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the container. + :ivar str public_access: Specifies whether data in the container may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the container has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the container has a legal hold. + :ivar dict metadata: A dict with name-value pairs to associate with the + container as metadata. + :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: + The default encryption scope configuration for the container. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.lease = LeaseProperties(**kwargs) + self.public_access = kwargs.get('x-ms-blob-public-access') + self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') + self.deleted = None + self.version = None + self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') + self.metadata = kwargs.get('metadata') + self.encryption_scope = None + default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') + if default_encryption_scope: + self.encryption_scope = ContainerEncryptionScope( + default_encryption_scope=default_encryption_scope, + prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) + ) + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = generated.properties.public_access + props.has_immutability_policy = generated.properties.has_immutability_policy + props.deleted = generated.deleted + props.version = generated.version + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access + return props + + +class ContainerPropertiesPaged(PageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class BlobProperties(DictMixin): + """ + Blob Properties. + + :ivar str name: + The name of the blob. + :ivar str container: + The container in which the blob resides. + :ivar str snapshot: + Datetime value that uniquely identifies the blob snapshot. + :ivar ~azure.blob.storage.BlobType blob_type: + String indicating this blob's type. + :ivar dict metadata: + Name-value pairs associated with the blob as metadata. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the blob was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + The size of the content returned. If the entire blob was requested, + the length of blob in bytes. If a subset of the blob was requested, the + length of the returned subset. + :ivar str content_range: + Indicates the range of bytes returned in the event that the client + requested a subset of the blob. + :ivar int append_blob_committed_block_count: + (For Append Blobs) Number of committed blocks in the blob. + :ivar bool is_append_blob_sealed: + Indicate if the append blob is sealed or not. + + .. versionadded:: 12.4.0 + + :ivar int page_blob_sequence_number: + (For Page Blobs) Sequence number for page blob used for coordinating + concurrent writes. + :ivar bool server_encrypted: + Set to true if the blob is encrypted on the server. + :ivar ~azure.storage.blob.CopyProperties copy: + Stores all the copy properties for the blob. + :ivar ~azure.storage.blob.ContentSettings content_settings: + Stores all the content settings for the blob. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the blob. + :ivar ~azure.storage.blob.StandardBlobTier blob_tier: + Indicates the access tier of the blob. The hot tier is optimized + for storing data that is accessed frequently. The cool storage tier + is optimized for storing data that is infrequently accessed and stored + for at least a month. The archive tier is optimized for storing + data that is rarely accessed and stored for at least six months + with flexible latency requirements. + :ivar str rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :ivar ~datetime.datetime blob_tier_change_time: + Indicates when the access tier was last changed. + :ivar bool blob_tier_inferred: + Indicates whether the access tier was inferred by the service. + If false, it indicates that the tier was set explicitly. + :ivar bool deleted: + Whether this blob was deleted. + :ivar ~datetime.datetime deleted_time: + A datetime object representing the time at which the blob was deleted. + :ivar int remaining_retention_days: + The number of days that the blob will be retained before being permanently deleted by the service. + :ivar ~datetime.datetime creation_time: + Indicates when the blob was created, in UTC. + :ivar str archive_status: + Archive status of blob. + :ivar str encryption_key_sha256: + The SHA-256 hash of the provided encryption key. + :ivar str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :ivar bool request_server_encrypted: + Whether this blob is encrypted. + :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: + Only present for blobs that have policy ids and rule ids applied to them. + + .. versionadded:: 12.4.0 + + :ivar str object_replication_destination_policy: + Represents the Object Replication Policy Id that created this blob. + + .. versionadded:: 12.4.0 + + :ivar ~datetime.datetime last_accessed_on: + Indicates when the last Read/Write operation was performed on a Blob. + + .. versionadded:: 12.6.0 + + :ivar int tag_count: + Tags count on this blob. + + .. versionadded:: 12.4.0 + + :ivar dict(str, str) tags: + Key value pair of tags on this blob. + + .. versionadded:: 12.4.0 + + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.container = None + self.snapshot = kwargs.get('x-ms-snapshot') + self.version_id = kwargs.get('x-ms-version-id') + self.is_current_version = kwargs.get('x-ms-is-current-version') + self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None + self.metadata = kwargs.get('metadata') + self.encrypted_metadata = kwargs.get('encrypted_metadata') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') + self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') + self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.blob_tier = kwargs.get('x-ms-access-tier') + self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') + self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') + self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') + self.deleted = False + self.deleted_time = None + self.remaining_retention_days = None + self.creation_time = kwargs.get('x-ms-creation-time') + self.archive_status = kwargs.get('x-ms-archive-status') + self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') + self.encryption_scope = kwargs.get('x-ms-encryption-scope') + self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') + self.object_replication_source_properties = kwargs.get('object_replication_source_properties') + self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') + self.last_accessed_on = kwargs.get('x-ms-last-access-time') + self.tag_count = kwargs.get('x-ms-tag-count') + self.tags = None + + +class FilteredBlob(DictMixin): + """Blob info from a Filter Blobs API call. + + :ivar name: Blob name + :type name: str + :ivar container_name: Container name. + :type container_name: str + """ + def __init__(self, **kwargs): + self.name = kwargs.get('name', None) + self.container_name = kwargs.get('container_name', None) + + +class FilteredBlobPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.FilteredBlob) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + blob = FilteredBlob(name=item.name, container_name=item.container_name) # pylint: disable=protected-access + return blob + return item + + +class LeaseProperties(DictMixin): + """Blob Lease Properties. + + :ivar str status: + The lease status of the blob. Possible values: locked|unlocked + :ivar str state: + Lease state of the blob. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a blob is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """The content settings of a blob. + + :param str content_type: + The content type specified for the blob. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the blob, that value is stored. + :param str content_language: + If the content_language has previously been set + for the blob, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the blob, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the blob, that value is stored. + :param str content_md5: + If the content_md5 has been set for the blob, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class CopyProperties(DictMixin): + """Blob Copy Properties. + + These properties will be `None` if this blob has never been the destination + in a Copy Blob operation, or if this blob has been modified after a concluded + Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. + + :ivar str id: + String identifier for the last attempted Copy Blob operation where this blob + was the destination blob. + :ivar str source: + URL up to 2 KB in length that specifies the source blob used in the last attempted + Copy Blob operation where this blob was the destination blob. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy Blob. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy Blob operation where this blob was the destination blob. Can show + between 0 and Content-Length bytes copied. + :ivar ~datetime.datetime completion_time: + Conclusion time of the last attempted Copy Blob operation where this blob was the + destination blob. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar ~datetime.datetime destination_snapshot: + Included if the blob is incremental copy blob or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this blob. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class BlobBlock(DictMixin): + """BlockBlob Block class. + + :param str block_id: + Block id. + :param str state: + Block state. Possible values: committed|uncommitted + :ivar int size: + Block size in bytes. + """ + + def __init__(self, block_id, state=BlockState.Latest): + self.id = block_id + self.state = state + self.size = None + + @classmethod + def _from_generated(cls, generated): + block = cls(decode_base64_to_text(generated.name)) + block.size = generated.size + return block + + +class PageRange(DictMixin): + """Page Range for page blob. + + :param int start: + Start of page range in bytes. + :param int end: + End of page range in bytes. + """ + + def __init__(self, start=None, end=None): + self.start = start + self.end = end + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class ContainerSasPermissions(object): + """ContainerSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_container_sas` function and + for the AccessPolicies used with + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + + :param bool read: + Read the content, properties, metadata or block list of any blob in the + container. Use any blob in the container as the source of a copy operation. + :param bool write: + For any blob in the container, create or write content, properties, + metadata, or block list. Snapshot or lease the blob. Resize the blob + (page blob only). Use the blob as the destination of a copy operation + within the same account. Note: You cannot grant permissions to read or + write container properties or metadata, nor to lease a container, with + a container SAS. Use an account SAS instead. + :param bool delete: + Delete any blob in the container. Note: You cannot grant permissions to + delete a container with a container SAS. Use an account SAS instead. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + List blobs in the container. + :param bool tag: + Set or get tags on the blobs in the container. + """ + def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin + self.read = read + self.write = write + self.delete = delete + self.list = list + self.delete_previous_version = delete_previous_version + self.tag = tag + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('t' if self.tag else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ContainerSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, delete, + and list permissions. + :return: A ContainerSasPermissions object + :rtype: ~azure.storage.blob.ContainerSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_delete_previous_version = 'x' in permission + p_tag = 't' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, + delete_previous_version=p_delete_previous_version, tag=p_tag) + + return parsed + + +class BlobSasPermissions(object): + """BlobSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_blob_sas` function. + + :param bool read: + Read the content, properties, metadata and block list. Use the blob as + the source of a copy operation. + :param bool add: + Add a block to an append blob. + :param bool create: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :param bool write: + Create or write content, properties, metadata, or block list. Snapshot + or lease the blob. Resize the blob (page blob only). Use the blob as the + destination of a copy operation within the same account. + :param bool delete: + Delete the blob. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool tag: + Set or get tags on the blob. + """ + def __init__(self, read=False, add=False, create=False, write=False, + delete=False, delete_previous_version=False, tag=True): + self.read = read + self.add = add + self.create = create + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.tag = tag + self._str = (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('t' if self.tag else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a BlobSasPermissions from a string. + + To specify read, add, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A BlobSasPermissions object + :rtype: ~azure.storage.blob.BlobSasPermissions + """ + p_read = 'r' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_tag = 't' in permission + + parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, + delete_previous_version=p_delete_previous_version, tag=p_tag) + + return parsed + + +class CustomerProvidedEncryptionKey(object): + """ + All data in Azure Storage is encrypted at-rest using an account-level encryption key. + In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents + and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. + + When you use a customer-provided key, Azure Storage does not manage or persist your key. + When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. + A SHA-256 hash of the encryption key is written alongside the blob contents, + and is used to verify that all subsequent operations against the blob use the same encryption key. + This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. + When reading a blob, the provided key is used to decrypt your data after reading it from disk. + In both cases, the provided encryption key is securely discarded + as soon as the encryption or decryption process completes. + + :param str key_value: + Base64-encoded AES-256 encryption key value. + :param str key_hash: + Base64-encoded SHA256 of the encryption key. + :ivar str algorithm: + Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + """ + def __init__(self, key_value, key_hash): + self.key_value = key_value + self.key_hash = key_hash + self.algorithm = 'AES256' + + +class ContainerEncryptionScope(object): + """The default encryption scope configuration for a container. + + This scope is used implicitly for all future writes within the container, + but can be overridden per blob operation. + + .. versionadded:: 12.2.0 + + :param str default_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + :param bool prevent_encryption_scope_override: + If true, prevents any request from specifying a different encryption scope than the scope + set on the container. Default value is false. + """ + + def __init__(self, default_encryption_scope, **kwargs): + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) + + @classmethod + def _from_generated(cls, generated): + if generated.properties.default_encryption_scope: + scope = cls( + generated.properties.default_encryption_scope, + prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False + ) + return scope + return None + + +class DelimitedJsonDialect(object): + """Defines the input or output JSON serialization for a blob data query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', '\n') + + +class DelimitedTextDialect(object): + """Defines the input or output delimited (CSV) serialization for a blob query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', ',') + self.quotechar = kwargs.pop('quotechar', '"') + self.lineterminator = kwargs.pop('lineterminator', '\n') + self.escapechar = kwargs.pop('escapechar', "") + self.has_header = kwargs.pop('has_header', False) + + +class ArrowDialect(ArrowField): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param ~azure.storage.blob.ArrowType type: Arrow field type. + :keyword str name: The name of the field. + :keyword int precision: The precision of the field. + :keyword int scale: The scale of the field. + """ + def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin + super(ArrowDialect, self).__init__(type=type, **kwargs) + + +class ArrowType(str, Enum): + + INT64 = "int64" + BOOL = "bool" + TIMESTAMP_MS = "timestamp[ms]" + STRING = "string" + DOUBLE = "double" + DECIMAL = 'decimal' + + +class ObjectReplicationPolicy(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str policy_id: + Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. + :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: + Within each policy there may be multiple replication rules. + e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 + """ + + def __init__(self, **kwargs): + self.policy_id = kwargs.pop('policy_id', None) + self.rules = kwargs.pop('rules', None) + + +class ObjectReplicationRule(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str rule_id: + Rule id. + :ivar str status: + The status of the rule. It could be "Complete" or "Failed" + """ + + def __init__(self, **kwargs): + self.rule_id = kwargs.pop('rule_id', None) + self.status = kwargs.pop('status', None) + + +class BlobQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py new file mode 100644 index 0000000..eb51d98 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py @@ -0,0 +1,196 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from io import BytesIO +from typing import Union, Iterable, IO # pylint: disable=unused-import + +from ._shared.avro.datafile import DataFileReader +from ._shared.avro.avro_io import DatumReader + + +class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + name=None, + container=None, + errors=None, + record_delimiter='\n', + encoding=None, + headers=None, + response=None, + error_cls=None, + ): + self.name = name + self.container = container + self.response_headers = headers + self.record_delimiter = record_delimiter + self._size = 0 + self._bytes_processed = 0 + self._errors = errors + self._encoding = encoding + self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) + self._first_result = self._process_record(next(self._parsed_results)) + self._error_cls = error_cls + + def __len__(self): + return self._size + + def _process_record(self, result): + self._size = result.get('totalBytes', self._size) + self._bytes_processed = result.get('bytesScanned', self._bytes_processed) + if 'data' in result: + return result.get('data') + if 'fatal' in result: + error = self._error_cls( + error=result['name'], + is_fatal=result['fatal'], + description=result['description'], + position=result['position'] + ) + if self._errors: + self._errors(error) + return None + + def _iter_stream(self): + if self._first_result is not None: + yield self._first_result + for next_result in self._parsed_results: + processed_result = self._process_record(next_result) + if processed_result is not None: + yield processed_result + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + for record in self._iter_stream(): + stream.write(record) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + delimiter = self.record_delimiter.encode('utf-8') + for record_chunk in self._iter_stream(): + for record in record_chunk.split(delimiter): + if self._encoding: + yield record.decode(self._encoding) + else: + yield record + + + +class QuickQueryStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator): + self.generator = generator + self.iterator = iter(generator) + self._buf = b"" + self._point = 0 + self._download_offset = 0 + self._buf_start = 0 + self.file_length = None + + def __len__(self): + return self.file_length + + def __iter__(self): + return self.iterator + + @staticmethod + def seekable(): + return True + + def __next__(self): + next_part = next(self.iterator) + self._download_offset += len(next_part) + return next_part + + next = __next__ # Python 2 compatibility. + + def tell(self): + return self._point + + def seek(self, offset, whence=0): + if whence == 0: + self._point = offset + elif whence == 1: + self._point += offset + else: + raise ValueError("whence must be 0, or 1") + if self._point < 0: + self._point = 0 # XXX is this right? + + def read(self, size): + try: + # keep reading from the generator until the buffer of this stream has enough data to read + while self._point + size > self._download_offset: + self._buf += self.__next__() + except StopIteration: + self.file_length = self._download_offset + + start_point = self._point + + # EOF + self._point = min(self._point + size, self._download_offset) + + relative_start = start_point - self._buf_start + if relative_start < 0: + raise ValueError("Buffer has dumped too much data") + relative_end = relative_start + size + data = self._buf[relative_start: relative_end] + + # dump the extra data in buffer + # buffer start--------------------16bytes----current read position + dumped_size = max(relative_end - 16 - relative_start, 0) + self._buf_start += dumped_size + self._buf = self._buf[dumped_size:] + + return data diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py b/azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py new file mode 100644 index 0000000..a4b13da --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py @@ -0,0 +1,196 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +try: + from urllib.parse import quote +except ImportError: + from urllib2 import quote # type: ignore + +from azure.core import MatchConditions + +from ._models import ( + ContainerEncryptionScope, + DelimitedJsonDialect) +from ._generated.models import ( + ModifiedAccessConditions, + SourceModifiedAccessConditions, + CpkScopeInfo, + ContainerCpkScopeInfo, + QueryFormat, + QuerySerialization, + DelimitedTextConfiguration, + JsonTextConfiguration, + ArrowConfiguration, + QueryFormatType, + BlobTag, + BlobTags, LeaseAccessConditions +) + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if kwargs.get(etag_param): + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_modify_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None), + if_tags=kwargs.pop('if_tags_match_condition', None) + ) + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), + source_if_tags=kwargs.pop('source_if_tags_match_condition', None) + ) + + +def get_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> CpkScopeInfo + if 'encryption_scope' in kwargs: + return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) + return None + + +def get_container_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> ContainerCpkScopeInfo + encryption_scope = kwargs.pop('container_encryption_scope', None) + if encryption_scope: + if isinstance(encryption_scope, ContainerEncryptionScope): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope.default_encryption_scope, + prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override + ) + if isinstance(encryption_scope, dict): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope['default_encryption_scope'], + prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') + ) + raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") + return None + + +def get_api_version(kwargs, default): + # type: (Dict[str, Any]) -> str + api_version = kwargs.pop('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or default + + +def serialize_blob_tags_header(tags=None): + # type: (Optional[Dict[str, str]]) -> str + if tags is None: + return None + + components = list() + if tags: + for key, value in tags.items(): + components.append(quote(key, safe='.-')) + components.append('=') + components.append(quote(value, safe='.-')) + components.append('&') + + if components: + del components[-1] + + return ''.join(components) + + +def serialize_blob_tags(tags=None): + # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] + tag_list = list() + if tags: + tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] + return BlobTags(blob_tag_set=tag_list) + + +def serialize_query_format(formater): + if isinstance(formater, DelimitedJsonDialect): + serialization_settings = JsonTextConfiguration( + record_separator=formater.delimiter + ) + qq_format = QueryFormat( + type=QueryFormatType.json, + json_text_configuration=serialization_settings) + elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well + try: + headers = formater.has_header + except AttributeError: + headers = False + serialization_settings = DelimitedTextConfiguration( + column_separator=formater.delimiter, + field_quote=formater.quotechar, + record_separator=formater.lineterminator, + escape_char=formater.escapechar, + headers_present=headers + ) + qq_format = QueryFormat( + type=QueryFormatType.delimited, + delimited_text_configuration=serialization_settings + ) + elif isinstance(formater, list): + serialization_settings = ArrowConfiguration( + schema=formater + ) + qq_format = QueryFormat( + type=QueryFormatType.arrow, + arrow_configuration=serialization_settings) + elif not formater: + return None + else: + raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") + return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py new file mode 100644 index 0000000..5b396cd --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py @@ -0,0 +1,5 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py new file mode 100644 index 0000000..93a5c13 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py @@ -0,0 +1,464 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import json +import logging +import struct +import sys + +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +STRUCT_FLOAT = struct.Struct('= 0), n + input_bytes = self.reader.read(n) + if n > 0 and not input_bytes: + raise StopIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return self.read_long() + + def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(self.read(4))[0] + + def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(self.read(8))[0] + + def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = self.read_long() + assert (nbytes >= 0), nbytes + return self.read(nbytes) + + def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + def skip_boolean(self): + self.skip(1) + + def skip_int(self): + self.skip_long() + + def skip_long(self): + b = ord(self.read(1)) + while (b & 0x80) != 0: + b = ord(self.read(1)) + + def skip_float(self): + self.skip(4) + + def skip_double(self): + self.skip(8) + + def skip_bytes(self): + self.skip(self.read_long()) + + def skip_utf8(self): + self.skip_bytes() + + def skip(self, n): + self.reader.seek(self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class DatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + def read(self, decoder): + return self.read_data(self.writer_schema, decoder) + + def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = decoder.read_boolean() + elif writer_schema.type == 'string': + result = decoder.read_utf8() + elif writer_schema.type == 'int': + result = decoder.read_int() + elif writer_schema.type == 'long': + result = decoder.read_long() + elif writer_schema.type == 'float': + result = decoder.read_float() + elif writer_schema.type == 'double': + result = decoder.read_double() + elif writer_schema.type == 'bytes': + result = decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = decoder.skip_boolean() + elif writer_schema.type == 'string': + result = decoder.skip_utf8() + elif writer_schema.type == 'int': + result = decoder.skip_int() + elif writer_schema.type == 'long': + result = decoder.skip_long() + elif writer_schema.type == 'float': + result = decoder.skip_float() + elif writer_schema.type == 'double': + result = decoder.skip_double() + elif writer_schema.type == 'bytes': + result = decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.skip_enum(decoder) + elif writer_schema.type == 'array': + self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return decoder.read(writer_schema.size) + + @staticmethod + def skip_fixed(writer_schema, decoder): + return decoder.skip(writer_schema.size) + + @staticmethod + def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + def skip_enum(decoder): + return decoder.skip_int() + + def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + read_items.append(self.read_data(writer_schema.items, decoder)) + block_count = decoder.read_long() + return read_items + + def skip_array(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + self.skip_data(writer_schema.items, decoder) + block_count = decoder.read_long() + + def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + key = decoder.read_utf8() + read_items[key] = self.read_data(writer_schema.values, decoder) + block_count = decoder.read_long() + return read_items + + def skip_map(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + decoder.skip_utf8() + self.skip_data(writer_schema.values, decoder) + block_count = decoder.read_long() + + def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return self.read_data(selected_writer_schema, decoder) + + def skip_union(self, writer_schema, decoder): + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py new file mode 100644 index 0000000..e981216 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py @@ -0,0 +1,448 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import logging +import sys + +from ..avro import schema + +from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Decoder + + +class AsyncBinaryDecoder(object): + """Read leaf values.""" + + def __init__(self, reader): + """ + reader is a Python object on which we can call read, seek, and tell. + """ + self._reader = reader + + @property + def reader(self): + """Reports the reader used by this decoder.""" + return self._reader + + async def read(self, n): + """Read n bytes. + + Args: + n: Number of bytes to read. + Returns: + The next n bytes from the input. + """ + assert (n >= 0), n + input_bytes = await self.reader.read(n) + if n > 0 and not input_bytes: + raise StopAsyncIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + async def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(await self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + async def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return await self.read_long() + + async def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(await self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(await self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + async def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(await self.read(4))[0] + + async def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(await self.read(8))[0] + + async def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = await self.read_long() + assert (nbytes >= 0), nbytes + return await self.read(nbytes) + + async def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = await self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + async def skip_boolean(self): + await self.skip(1) + + async def skip_int(self): + await self.skip_long() + + async def skip_long(self): + b = ord(await self.read(1)) + while (b & 0x80) != 0: + b = ord(await self.read(1)) + + async def skip_float(self): + await self.skip(4) + + async def skip_double(self): + await self.skip(8) + + async def skip_bytes(self): + await self.skip(await self.read_long()) + + async def skip_utf8(self): + await self.skip_bytes() + + async def skip(self, n): + await self.reader.seek(await self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class AsyncDatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema", and the schema expected by the + reader the "reader's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + async def read(self, decoder): + return await self.read_data(self.writer_schema, decoder) + + async def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = await decoder.read_boolean() + elif writer_schema.type == 'string': + result = await decoder.read_utf8() + elif writer_schema.type == 'int': + result = await decoder.read_int() + elif writer_schema.type == 'long': + result = await decoder.read_long() + elif writer_schema.type == 'float': + result = await decoder.read_float() + elif writer_schema.type == 'double': + result = await decoder.read_double() + elif writer_schema.type == 'bytes': + result = await decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = await self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = await self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = await self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = await self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = await self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + async def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = await decoder.skip_boolean() + elif writer_schema.type == 'string': + result = await decoder.skip_utf8() + elif writer_schema.type == 'int': + result = await decoder.skip_int() + elif writer_schema.type == 'long': + result = await decoder.skip_long() + elif writer_schema.type == 'float': + result = await decoder.skip_float() + elif writer_schema.type == 'double': + result = await decoder.skip_double() + elif writer_schema.type == 'bytes': + result = await decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = await self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.skip_enum(decoder) + elif writer_schema.type == 'array': + await self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + await self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = await self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + await self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + async def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return await decoder.read(writer_schema.size) + + @staticmethod + async def skip_fixed(writer_schema, decoder): + return await decoder.skip(writer_schema.size) + + @staticmethod + async def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = await decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + async def skip_enum(decoder): + return await decoder.skip_int() + + async def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + read_items.append(await self.read_data(writer_schema.items, decoder)) + block_count = await decoder.read_long() + return read_items + + async def skip_array(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await self.skip_data(writer_schema.items, decoder) + block_count = await decoder.read_long() + + async def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + key = await decoder.read_utf8() + read_items[key] = await self.read_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + return read_items + + async def skip_map(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await decoder.skip_utf8() + await self.skip_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + + async def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return await self.read_data(selected_writer_schema, decoder) + + async def skip_union(self, writer_schema, decoder): + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + async def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = await self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + async def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + await self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py new file mode 100644 index 0000000..df06fe0 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py @@ -0,0 +1,266 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import io +import logging +import sys +import zlib + +from ..avro import avro_io +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Version of the container file: +VERSION = 1 + +if PY3: + MAGIC = b'Obj' + bytes([VERSION]) + MAGIC_SIZE = len(MAGIC) +else: + MAGIC = 'Obj' + chr(VERSION) + MAGIC_SIZE = len(MAGIC) + +# Size of the synchronization marker, in number of bytes: +SYNC_SIZE = 16 + +# Schema of the container header: +META_SCHEMA = schema.parse(""" +{ + "type": "record", "name": "org.apache.avro.file.Header", + "fields": [{ + "name": "magic", + "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} + }, { + "name": "meta", + "type": {"type": "map", "values": "bytes"} + }, { + "name": "sync", + "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} + }] +} +""" % { + 'magic_size': MAGIC_SIZE, + 'sync_size': SYNC_SIZE, +}) + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null', 'deflate']) + +# Metadata key associated to the schema: +SCHEMA_KEY = "avro.schema" + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class DataFileException(schema.AvroException): + """Problem reading or writing file object containers.""" + +# ------------------------------------------------------------------------------ + + +class DataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io.BinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + self._reader.seek(0, 0) + + # read the header: magic, meta, sync + self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + self._cur_object_index = 0 + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + + def __enter__(self): + return self + + def __exit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __iter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + header_reader.seek(0, 0) + + # read header into a dict + header = self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + def _read_block_header(self): + self._block_count = self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + elif self.codec == 'deflate': + # Compressed data is stored as (length, data), which + # corresponds to how the "bytes" type is encoded. + data = self.raw_decoder.read_bytes() + # -15 is the log of the window size; negative indicates + # "raw" (no zlib headers) decompression. See zlib.h. + uncompressed = zlib.decompress(data, -15) + self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopIteration + if proposed_sync_marker != self.sync_marker: + self.reader.seek(-SYNC_SIZE, 1) + + def __next__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + self._cur_object_index = 0 + + self._read_block_header() + + datum = self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + self.reader.track_object_position() + self.reader.set_object_index(0) + else: + self.reader.set_object_index(self._cur_object_index) + + return datum + + # PY2 + def next(self): + return self.__next__() + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py new file mode 100644 index 0000000..1e9d018 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py @@ -0,0 +1,215 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import logging +import sys + +from ..avro import avro_io_async +from ..avro import schema +from .datafile import DataFileException +from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY + + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null']) + + +class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else \ + avro_io_async.AsyncBinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + self.codec = "null" + self._block_count = 0 + self._cur_object_index = 0 + self._meta = None + self._sync_marker = None + + async def init(self): + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + await self._reader.seek(0, 0) + + # read the header: magic, meta, sync + await self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + return self + + async def __aenter__(self): + return self + + async def __aexit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __aiter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + async def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + await header_reader.seek(0, 0) + + # read header into a dict + header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + async def _read_block_header(self): + self._block_count = await self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + await self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + async def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = await self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopAsyncIteration + if proposed_sync_marker != self.sync_marker: + await self.reader.seek(-SYNC_SIZE, 1) + + async def __anext__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + await self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + await self.reader.track_object_position() + self._cur_object_index = 0 + + await self._read_block_header() + + datum = await self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + await self.reader.track_object_position() + await self.reader.set_object_index(0) + else: + await self.reader.set_object_index(self._cur_object_index) + + return datum + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py new file mode 100644 index 0000000..ffe2853 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py @@ -0,0 +1,1221 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +"""Representation of Avro schemas. + +A schema may be one of: + - A record, mapping field names to field value data; + - An error, equivalent to a record; + - An enum, containing one of a small set of symbols; + - An array of values, all of the same schema; + - A map containing string/value pairs, each of a declared schema; + - A union of other schemas; + - A fixed sized binary object; + - A unicode string; + - A sequence of bytes; + - A 32-bit signed int; + - A 64-bit signed long; + - A 32-bit floating-point float; + - A 64-bit floating-point double; + - A boolean; + - Null. +""" + +import abc +import json +import logging +import re +import sys +from six import with_metaclass + +PY2 = sys.version_info[0] == 2 + +if PY2: + _str = unicode # pylint: disable=undefined-variable +else: + _str = str + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Log level more verbose than DEBUG=10, INFO=20, etc. +DEBUG_VERBOSE = 5 + +NULL = 'null' +BOOLEAN = 'boolean' +STRING = 'string' +BYTES = 'bytes' +INT = 'int' +LONG = 'long' +FLOAT = 'float' +DOUBLE = 'double' +FIXED = 'fixed' +ENUM = 'enum' +RECORD = 'record' +ERROR = 'error' +ARRAY = 'array' +MAP = 'map' +UNION = 'union' + +# Request and error unions are part of Avro protocols: +REQUEST = 'request' +ERROR_UNION = 'error_union' + +PRIMITIVE_TYPES = frozenset([ + NULL, + BOOLEAN, + STRING, + BYTES, + INT, + LONG, + FLOAT, + DOUBLE, +]) + +NAMED_TYPES = frozenset([ + FIXED, + ENUM, + RECORD, + ERROR, +]) + +VALID_TYPES = frozenset.union( + PRIMITIVE_TYPES, + NAMED_TYPES, + [ + ARRAY, + MAP, + UNION, + REQUEST, + ERROR_UNION, + ], +) + +SCHEMA_RESERVED_PROPS = frozenset([ + 'type', + 'name', + 'namespace', + 'fields', # Record + 'items', # Array + 'size', # Fixed + 'symbols', # Enum + 'values', # Map + 'doc', +]) + +FIELD_RESERVED_PROPS = frozenset([ + 'default', + 'name', + 'doc', + 'order', + 'type', +]) + +VALID_FIELD_SORT_ORDERS = frozenset([ + 'ascending', + 'descending', + 'ignore', +]) + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class Error(Exception): + """Base class for errors in this module.""" + + +class AvroException(Error): + """Generic Avro schema error.""" + + +class SchemaParseException(AvroException): + """Error while parsing a JSON schema descriptor.""" + + +class Schema(with_metaclass(abc.ABCMeta, object)): + """Abstract base class for all Schema classes.""" + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object. + + Args: + data_type: Type of the schema to initialize. + other_props: Optional dictionary of additional properties. + """ + if data_type not in VALID_TYPES: + raise SchemaParseException('%r is not a valid Avro type.' % data_type) + + # All properties of this schema, as a map: property name -> property value + self._props = {} + + self._props['type'] = data_type + self._type = data_type + + if other_props: + self._props.update(other_props) + + @property + def namespace(self): + """Returns: the namespace this schema belongs to, if any, or None.""" + return self._props.get('namespace', None) + + @property + def type(self): + """Returns: the type of this schema.""" + return self._type + + @property + def doc(self): + """Returns: the documentation associated to this schema, if any, or None.""" + return self._props.get('doc', None) + + @property + def props(self): + """Reports all the properties of this schema. + + Includes all properties, reserved and non reserved. + JSON properties of this schema are directly generated from this dict. + + Returns: + A dictionary of properties associated to this schema. + """ + return self._props + + @property + def other_props(self): + """Returns: the dictionary of non-reserved properties.""" + return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) + + def __str__(self): + """Returns: the JSON representation of this schema.""" + return json.dumps(self.to_json(names=None)) + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + + +# ------------------------------------------------------------------------------ + + +_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') + +_RE_FULL_NAME = re.compile( + r'^' + r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace + r'([A-Za-z_][A-Za-z0-9_]*)' # name + r'$' +) + + +class Name(object): + """Representation of an Avro name.""" + + def __init__(self, name, namespace=None): + """Parses an Avro name. + + Args: + name: Avro name to parse (relative or absolute). + namespace: Optional explicit namespace if the name is relative. + """ + # Normalize: namespace is always defined as a string, possibly empty. + if namespace is None: + namespace = '' + + if '.' in name: + # name is absolute, namespace is ignored: + self._fullname = name + + match = _RE_FULL_NAME.match(self._fullname) + if match is None: + raise SchemaParseException( + 'Invalid absolute schema name: %r.' % self._fullname) + + self._name = match.group(1) + self._namespace = self._fullname[:-(len(self._name) + 1)] + + else: + # name is relative, combine with explicit namespace: + self._name = name + self._namespace = namespace + self._fullname = (self._name + if (not self._namespace) else + '%s.%s' % (self._namespace, self._name)) + + # Validate the fullname: + if _RE_FULL_NAME.match(self._fullname) is None: + raise SchemaParseException( + 'Invalid schema name %r infered from name %r and namespace %r.' + % (self._fullname, self._name, self._namespace)) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + return self.fullname == other.fullname + + @property + def simple_name(self): + """Returns: the simple name part of this name.""" + return self._name + + @property + def namespace(self): + """Returns: this name's namespace, possible the empty string.""" + return self._namespace + + @property + def fullname(self): + """Returns: the full name.""" + return self._fullname + + +# ------------------------------------------------------------------------------ + + +class Names(object): + """Tracks Avro named schemas and default namespace during parsing.""" + + def __init__(self, default_namespace=None, names=None): + """Initializes a new name tracker. + + Args: + default_namespace: Optional default namespace. + names: Optional initial mapping of known named schemas. + """ + if names is None: + names = {} + self._names = names + self._default_namespace = default_namespace + + @property + def names(self): + """Returns: the mapping of known named schemas.""" + return self._names + + @property + def default_namespace(self): + """Returns: the default namespace, if any, or None.""" + return self._default_namespace + + def new_with_default_namespace(self, namespace): + """Creates a new name tracker from this tracker, but with a new default ns. + + Args: + namespace: New default namespace to use. + Returns: + New name tracker with the specified default namespace. + """ + return Names(names=self._names, default_namespace=namespace) + + def get_name(self, name, namespace=None): + """Resolves the Avro name according to this name tracker's state. + + Args: + name: Name to resolve (absolute or relative). + namespace: Optional explicit namespace. + Returns: + The specified name, resolved according to this tracker. + """ + if namespace is None: + namespace = self._default_namespace + return Name(name=name, namespace=namespace) + + def get_schema(self, name, namespace=None): + """Resolves an Avro schema by name. + + Args: + name: Name (relative or absolute) of the Avro schema to look up. + namespace: Optional explicit namespace. + Returns: + The schema with the specified name, if any, or None. + """ + avro_name = self.get_name(name=name, namespace=namespace) + return self._names.get(avro_name.fullname, None) + + def prune_namespace(self, properties): + """given a properties, return properties with namespace removed if + it matches the own default namespace + """ + if self.default_namespace is None: + # I have no default -- no change + return properties + if 'namespace' not in properties: + # he has no namespace - no change + return properties + if properties['namespace'] != self.default_namespace: + # we're different - leave his stuff alone + return properties + # we each have a namespace and it's redundant. delete his. + prunable = properties.copy() + del prunable['namespace'] + return prunable + + def register(self, schema): + """Registers a new named schema in this tracker. + + Args: + schema: Named Avro schema to register in this tracker. + """ + if schema.fullname in VALID_TYPES: + raise SchemaParseException( + '%s is a reserved type name.' % schema.fullname) + if schema.fullname in self.names: + raise SchemaParseException( + 'Avro name %r already exists.' % schema.fullname) + + logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) + self._names[schema.fullname] = schema + + +# ------------------------------------------------------------------------------ + + +class NamedSchema(Schema): + """Abstract base class for named schemas. + + Named schemas are enumerated in NAMED_TYPES. + """ + + def __init__( + self, + data_type, + name=None, + namespace=None, + names=None, + other_props=None, + ): + """Initializes a new named schema object. + + Args: + data_type: Type of the named schema. + name: Name (absolute or relative) of the schema. + namespace: Optional explicit namespace if name is relative. + names: Tracker to resolve and register Avro names. + other_props: Optional map of additional properties of the schema. + """ + assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) + self._avro_name = names.get_name(name=name, namespace=namespace) + + super(NamedSchema, self).__init__(data_type, other_props) + + names.register(self) + + self._props['name'] = self.name + if self.namespace: + self._props['namespace'] = self.namespace + + @property + def avro_name(self): + """Returns: the Name object describing this schema's name.""" + return self._avro_name + + @property + def name(self): + return self._avro_name.simple_name + + @property + def namespace(self): + return self._avro_name.namespace + + @property + def fullname(self): + return self._avro_name.fullname + + def name_ref(self, names): + """Reports this schema name relative to the specified name tracker. + + Args: + names: Avro name tracker to relativise this schema name against. + Returns: + This schema name, relativised against the specified name tracker. + """ + if self.namespace == names.default_namespace: + return self.name + return self.fullname + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + +# ------------------------------------------------------------------------------ + + +_NO_DEFAULT = object() + + +class Field(object): + """Representation of the schema of a field in a record.""" + + def __init__( + self, + data_type, + name, + index, + has_default, + default=_NO_DEFAULT, + order=None, + doc=None, + other_props=None + ): + """Initializes a new Field object. + + Args: + data_type: Avro schema of the field. + name: Name of the field. + index: 0-based position of the field. + has_default: + default: + order: + doc: + other_props: + """ + if (not isinstance(name, _str)) or (not name): + raise SchemaParseException('Invalid record field name: %r.' % name) + if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): + raise SchemaParseException('Invalid record field order: %r.' % order) + + # All properties of this record field: + self._props = {} + + self._has_default = has_default + if other_props: + self._props.update(other_props) + + self._index = index + self._type = self._props['type'] = data_type + self._name = self._props['name'] = name + + if has_default: + self._props['default'] = default + + if order is not None: + self._props['order'] = order + + if doc is not None: + self._props['doc'] = doc + + @property + def type(self): + """Returns: the schema of this field.""" + return self._type + + @property + def name(self): + """Returns: this field name.""" + return self._name + + @property + def index(self): + """Returns: the 0-based index of this field in the record.""" + return self._index + + @property + def default(self): + return self._props['default'] + + @property + def has_default(self): + return self._has_default + + @property + def order(self): + return self._props.get('order', None) + + @property + def doc(self): + return self._props.get('doc', None) + + @property + def props(self): + return self._props + + @property + def other_props(self): + return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) + + def __str__(self): + return json.dumps(self.to_json()) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['type'] = self.type.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Primitive Types + + +class PrimitiveSchema(Schema): + """Schema of a primitive Avro type. + + Valid primitive types are defined in PRIMITIVE_TYPES. + """ + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object for the specified primitive type. + + Args: + data_type: Type of the schema to construct. Must be primitive. + """ + if data_type not in PRIMITIVE_TYPES: + raise AvroException('%r is not a valid primitive type.' % data_type) + super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) + + @property + def name(self): + """Returns: the simple name of this schema.""" + # The name of a primitive type is the type itself. + return self.type + + @property + def fullname(self): + """Returns: the fully qualified name of this schema.""" + # The full name is the simple name for primitive schema. + return self.name + + def to_json(self, names=None): + if len(self.props) == 1: + return self.fullname + return self.props + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (non-recursive) + + +class FixedSchema(NamedSchema): + def __init__( + self, + name, + namespace, + size, + names=None, + other_props=None, + ): + # Ensure valid ctor args + if not isinstance(size, int): + fail_msg = 'Fixed Schema requires a valid integer for size property.' + raise AvroException(fail_msg) + + super(FixedSchema, self).__init__( + data_type=FIXED, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + self._props['size'] = size + + @property + def size(self): + """Returns: the size of this fixed schema, in bytes.""" + return self._props['size'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ + + +class EnumSchema(NamedSchema): + def __init__( + self, + name, + namespace, + symbols, + names=None, + doc=None, + other_props=None, + ): + """Initializes a new enumeration schema object. + + Args: + name: Simple name of this enumeration. + namespace: Optional namespace. + symbols: Ordered list of symbols defined in this enumeration. + names: + doc: + other_props: + """ + symbols = tuple(symbols) + symbol_set = frozenset(symbols) + if (len(symbol_set) != len(symbols) + or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): + raise AvroException( + 'Invalid symbols for enum schema: %r.' % (symbols,)) + + super(EnumSchema, self).__init__( + data_type=ENUM, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + + self._props['symbols'] = symbols + if doc is not None: + self._props['doc'] = doc + + @property + def symbols(self): + """Returns: the symbols defined in this enum.""" + return self._props['symbols'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (recursive) + + +class ArraySchema(Schema): + """Schema of an array.""" + + def __init__(self, items, other_props=None): + """Initializes a new array schema object. + + Args: + items: Avro schema of the array items. + other_props: + """ + super(ArraySchema, self).__init__( + data_type=ARRAY, + other_props=other_props, + ) + self._items_schema = items + self._props['items'] = items + + @property + def items(self): + """Returns: the schema of the items in this array.""" + return self._items_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + item_schema = self.items + to_dump['items'] = item_schema.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class MapSchema(Schema): + """Schema of a map.""" + + def __init__(self, values, other_props=None): + """Initializes a new map schema object. + + Args: + values: Avro schema of the map values. + other_props: + """ + super(MapSchema, self).__init__( + data_type=MAP, + other_props=other_props, + ) + self._values_schema = values + self._props['values'] = values + + @property + def values(self): + """Returns: the schema of the values in this map.""" + return self._values_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['values'] = self.values.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class UnionSchema(Schema): + """Schema of a union.""" + + def __init__(self, schemas): + """Initializes a new union schema object. + + Args: + schemas: Ordered collection of schema branches in the union. + """ + super(UnionSchema, self).__init__(data_type=UNION) + self._schemas = tuple(schemas) + + # Validate the schema branches: + + # All named schema names are unique: + named_branches = tuple( + filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) + unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) + if len(unique_names) != len(named_branches): + raise AvroException( + 'Invalid union branches with duplicate schema name:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + # Types are unique within unnamed schemas, and union is not allowed: + unnamed_branches = tuple( + filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) + unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) + if UNION in unique_types: + raise AvroException( + 'Invalid union branches contain other unions:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + if len(unique_types) != len(unnamed_branches): + raise AvroException( + 'Invalid union branches with duplicate type:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + @property + def schemas(self): + """Returns: the ordered list of schema branches in the union.""" + return self._schemas + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + to_dump.append(schema.to_json(names)) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class ErrorUnionSchema(UnionSchema): + """Schema representing the declared errors of a protocol message.""" + + def __init__(self, schemas): + """Initializes an error-union schema. + + Args: + schema: collection of error schema. + """ + # Prepend "string" to handle system errors + schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) + super(ErrorUnionSchema, self).__init__(schemas=schemas) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + # Don't print the system error schema + if schema.type == STRING: + continue + to_dump.append(schema.to_json(names)) + return to_dump + + +# ------------------------------------------------------------------------------ + + +class RecordSchema(NamedSchema): + """Schema of a record.""" + + @staticmethod + def _make_field(index, field_desc, names): + """Builds field schemas from a list of field JSON descriptors. + + Args: + index: 0-based index of the field in the record. + field_desc: JSON descriptors of a record field. + Return: + The field schema. + """ + field_schema = schema_from_json_data( + json_data=field_desc['type'], + names=names, + ) + other_props = ( + dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) + return Field( + data_type=field_schema, + name=field_desc['name'], + index=index, + has_default=('default' in field_desc), + default=field_desc.get('default', _NO_DEFAULT), + order=field_desc.get('order', None), + doc=field_desc.get('doc', None), + other_props=other_props, + ) + + @staticmethod + def make_field_list(field_desc_list, names): + """Builds field schemas from a list of field JSON descriptors. + + Guarantees field name unicity. + + Args: + field_desc_list: collection of field JSON descriptors. + names: Avro schema tracker. + Yields + Field schemas. + """ + for index, field_desc in enumerate(field_desc_list): + yield RecordSchema._make_field(index, field_desc, names) + + @staticmethod + def _make_field_map(fields): + """Builds the field map. + + Guarantees field name unicity. + + Args: + fields: iterable of field schema. + Returns: + A map of field schemas, indexed by name. + """ + field_map = {} + for field in fields: + if field.name in field_map: + raise SchemaParseException( + 'Duplicate record field name %r.' % field.name) + field_map[field.name] = field + return field_map + + def __init__( + self, + name, + namespace, + fields=None, + make_fields=None, + names=None, + record_type=RECORD, + doc=None, + other_props=None + ): + """Initializes a new record schema object. + + Args: + name: Name of the record (absolute or relative). + namespace: Optional namespace the record belongs to, if name is relative. + fields: collection of fields to add to this record. + Exactly one of fields or make_fields must be specified. + make_fields: function creating the fields that belong to the record. + The function signature is: make_fields(names) -> ordered field list. + Exactly one of fields or make_fields must be specified. + names: + record_type: Type of the record: one of RECORD, ERROR or REQUEST. + Protocol requests are not named. + doc: + other_props: + """ + if record_type == REQUEST: + # Protocol requests are not named: + super(RecordSchema, self).__init__( + data_type=REQUEST, + other_props=other_props, + ) + elif record_type in [RECORD, ERROR]: + # Register this record name in the tracker: + super(RecordSchema, self).__init__( + data_type=record_type, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + else: + raise SchemaParseException( + 'Invalid record type: %r.' % record_type) + + if record_type in [RECORD, ERROR]: + avro_name = names.get_name(name=name, namespace=namespace) + nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) + elif record_type == REQUEST: + # Protocol request has no name: no need to change default namespace: + nested_names = names + + if fields is None: + fields = make_fields(names=nested_names) + else: + assert make_fields is None + self._fields = tuple(fields) + + self._field_map = RecordSchema._make_field_map(self._fields) + + self._props['fields'] = fields + if doc is not None: + self._props['doc'] = doc + + @property + def fields(self): + """Returns: the field schemas, as an ordered tuple.""" + return self._fields + + @property + def field_map(self): + """Returns: a read-only map of the field schemas index by field names.""" + return self._field_map + + def to_json(self, names=None): + if names is None: + names = Names() + # Request records don't have names + if self.type == REQUEST: + return [f.to_json(names) for f in self.fields] + + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + + to_dump = names.prune_namespace(self.props.copy()) + to_dump['fields'] = [f.to_json(names) for f in self.fields] + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Module functions + + +def filter_keys_out(items, keys): + """Filters a collection of (key, value) items. + + Exclude any item whose key belongs to keys. + + Args: + items: Dictionary of items to filter the keys out of. + keys: Keys to filter out. + Yields: + Filtered items. + """ + for key, value in items.items(): + if key in keys: + continue + yield key, value + + +# ------------------------------------------------------------------------------ + + +def _schema_from_json_string(json_string, names): + if json_string in PRIMITIVE_TYPES: + return PrimitiveSchema(data_type=json_string) + + # Look for a known named schema: + schema = names.get_schema(name=json_string) + if schema is None: + raise SchemaParseException( + 'Unknown named schema %r, known names: %r.' + % (json_string, sorted(names.names))) + return schema + + +def _schema_from_json_array(json_array, names): + def MakeSchema(desc): + return schema_from_json_data(json_data=desc, names=names) + + return UnionSchema(map(MakeSchema, json_array)) + + +def _schema_from_json_object(json_object, names): + data_type = json_object.get('type') + if data_type is None: + raise SchemaParseException( + 'Avro schema JSON descriptor has no "type" property: %r' % json_object) + + other_props = dict( + filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) + + if data_type in PRIMITIVE_TYPES: + # FIXME should not ignore other properties + result = PrimitiveSchema(data_type, other_props=other_props) + + elif data_type in NAMED_TYPES: + name = json_object.get('name') + namespace = json_object.get('namespace', names.default_namespace) + if data_type == FIXED: + size = json_object.get('size') + result = FixedSchema(name, namespace, size, names, other_props) + elif data_type == ENUM: + symbols = json_object.get('symbols') + doc = json_object.get('doc') + result = EnumSchema(name, namespace, symbols, names, doc, other_props) + + elif data_type in [RECORD, ERROR]: + field_desc_list = json_object.get('fields', ()) + + def MakeFields(names): + return tuple(RecordSchema.make_field_list(field_desc_list, names)) + + result = RecordSchema( + name=name, + namespace=namespace, + make_fields=MakeFields, + names=names, + record_type=data_type, + doc=json_object.get('doc'), + other_props=other_props, + ) + else: + raise Exception('Internal error: unknown type %r.' % data_type) + + elif data_type in VALID_TYPES: + # Unnamed, non-primitive Avro type: + + if data_type == ARRAY: + items_desc = json_object.get('items') + if items_desc is None: + raise SchemaParseException( + 'Invalid array schema descriptor with no "items" : %r.' + % json_object) + result = ArraySchema( + items=schema_from_json_data(items_desc, names), + other_props=other_props, + ) + + elif data_type == MAP: + values_desc = json_object.get('values') + if values_desc is None: + raise SchemaParseException( + 'Invalid map schema descriptor with no "values" : %r.' + % json_object) + result = MapSchema( + values=schema_from_json_data(values_desc, names=names), + other_props=other_props, + ) + + elif data_type == ERROR_UNION: + error_desc_list = json_object.get('declared_errors') + assert error_desc_list is not None + error_schemas = map( + lambda desc: schema_from_json_data(desc, names=names), + error_desc_list) + result = ErrorUnionSchema(schemas=error_schemas) + + else: + raise Exception('Internal error: unknown type %r.' % data_type) + else: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r' % json_object) + return result + + +# Parsers for the JSON data types: +_JSONDataParserTypeMap = { + _str: _schema_from_json_string, + list: _schema_from_json_array, + dict: _schema_from_json_object, +} + + +def schema_from_json_data(json_data, names=None): + """Builds an Avro Schema from its JSON descriptor. + + Args: + json_data: JSON data representing the descriptor of the Avro schema. + names: Optional tracker for Avro named schemas. + Returns: + The Avro schema parsed from the JSON descriptor. + Raises: + SchemaParseException: if the descriptor is invalid. + """ + if names is None: + names = Names() + + # Select the appropriate parser based on the JSON data type: + parser = _JSONDataParserTypeMap.get(type(json_data)) + if parser is None: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) + return parser(json_data, names=names) + + +# ------------------------------------------------------------------------------ + + +def parse(json_string): + """Constructs a Schema from its JSON descriptor in text form. + + Args: + json_string: String representation of the JSON descriptor of the schema. + Returns: + The parsed schema. + Raises: + SchemaParseException: on JSON parsing error, + or if the JSON descriptor is invalid. + """ + try: + json_data = json.loads(json_string) + except Exception as exn: + raise SchemaParseException( + 'Error parsing schema from JSON: %r. ' + 'Error message: %r.' + % (json_string, exn)) + + # Initialize the names object + names = Names() + + # construct the Avro Schema object + return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py new file mode 100644 index 0000000..361931a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py @@ -0,0 +1,443 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, + Optional, + Any, + Iterable, + Dict, + List, + Type, + Tuple, + TYPE_CHECKING, +) +import logging + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, + "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, + "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/?comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except StorageErrorException as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict(conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + except KeyError: + credential = conn_settings.get("SharedAccessSignature") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DefaultEndpointsProtocol"], + conn_settings["AccountName"], + service, + conn_settings["EndpointSuffix"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py new file mode 100644 index 0000000..1fec883 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py @@ -0,0 +1,185 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/?comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('sas', None), + kwargs.pop('timeout', None) + ), + headers={ + 'x-ms-version': self.api_version + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except StorageErrorException as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py new file mode 100644 index 0000000..f67ea29 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated.version import VERSION + + +X_MS_VERSION = VERSION + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds + # the 80000 seconds was calculated with: + # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 80000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py new file mode 100644 index 0000000..c51356b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py @@ -0,0 +1,466 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.blob.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + + return parsed + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py new file mode 100644 index 0000000..c9bc798 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py @@ -0,0 +1,610 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py new file mode 100644 index 0000000..4f15b65 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py @@ -0,0 +1,147 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py new file mode 100644 index 0000000..ac526e5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py @@ -0,0 +1,159 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.location_mode, deserialized + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body.iter(): + if info.tag.lower() == 'code': + error_code = info.text + elif info.tag.lower() == 'message': + error_message = info.text + else: + additional_data[info.tag] = info.text + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py new file mode 100644 index 0000000..abf3fb2 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py @@ -0,0 +1,550 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = self._upload_substream_block(block_id, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, block_id, block_stream): + try: + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py new file mode 100644 index 0000000..fe68a2b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py @@ -0,0 +1,350 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = await self._upload_substream_block(block_id, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, block_id, block_stream): + try: + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + chunk_end, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py new file mode 100644 index 0000000..370fe4e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py @@ -0,0 +1,596 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TYPE_CHECKING +) + +from ._shared import sign_string, url_quote +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ + QueryStringConstants + +if TYPE_CHECKING: + from datetime import datetime + from .import ( + ResourceTypes, + AccountSasPermissions, + UserDelegationKey, + ContainerSasPermissions, + BlobSasPermissions + ) + + +class BlobQueryStringConstants(object): + SIGNED_TIMESTAMP = 'snapshot' + + +class BlobSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating blob and container access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key=None, user_delegation_key=None): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key on any Blob service object. + ''' + super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + self.user_delegation_key = user_delegation_key + + def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, + expiry=None, start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the blob or one of its snapshots. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to grant permission. + :param BlobSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = container_name + '/' + blob_name + + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + + resource = 'bs' if snapshot else 'b' + resource = 'bv' if version_id else resource + resource = 'd' if kwargs.pop("is_directory", None) else resource + sas.add_resource(resource) + + sas.add_timestamp(snapshot or version_id) + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, resource_path, + user_delegation_key=self.user_delegation_key) + + return sas.get_token() + + def generate_container(self, container_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the container. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param ContainerSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('c') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, container_name, + user_delegation_key=self.user_delegation_key) + return sas.get_token() + + +class _BlobSharedAccessHelper(_SharedAccessHelper): + + def add_timestamp(self, timestamp): + self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) + + def add_info_for_hns_account(self, **kwargs): + self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) + self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) + + def get_value_to_append(self, query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): + # pylint: disable = no-member + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/blob/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource) + + if user_delegation_key is not None: + self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) + self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) + self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) + self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) + self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) + self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_TID) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) + else: + string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + + self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + + self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + + self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key if user_delegation_key is None else user_delegation_key.value, + string_to_sign)) + + def get_token(self): + # a conscious decision was made to exclude the timestamp in the generated token + # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp + exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] + return '&'.join(['{0}={1}'.format(n, url_quote(v)) + for n, v in self.query_dict.items() if v is not None and n not in exclude]) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the blob service. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.blob.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_sas_token] + :end-before: [END create_sas_token] + :language: python + :dedent: 8 + :caption: Generating a shared access signature. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(blob=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_container_sas( + account_name, # type: str + container_name, # type: str + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[ContainerSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a container. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 12 + :caption: Generating a sas token. + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_container( + container_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_blob_sas( + account_name, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[BlobSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a blob. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str blob_name: + The name of the blob. + :param str snapshot: + An optional blob snapshot ID. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.BlobSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str version_id: + An optional blob version ID. This parameter is only for versioning enabled account + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + version_id = kwargs.pop('version_id', None) + if version_id and snapshot: + raise ValueError("snapshot and version_id cannot be set at the same time.") + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_blob( + container_name, + blob_name, + snapshot=snapshot, + version_id=version_id, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py new file mode 100644 index 0000000..bd59362 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py @@ -0,0 +1,291 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.exceptions import ResourceExistsError, ResourceModifiedError + +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers) +from ._shared.models import StorageErrorCode +from ._shared.uploads import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader) +from ._shared.encryption import generate_blob_encryption_data, encrypt_blob +from ._generated.models import ( + StorageErrorException, + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) + +if TYPE_CHECKING: + from datetime import datetime # pylint: disable=unused-import + BlobLeaseClient = TypeVar("BlobLeaseClient") + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +def _convert_mod_error(error): + message = error.message.replace( + "The condition specified using HTTP conditional header(s) is not met.", + "The specified blob already exists.") + message = message.replace("ConditionNotMet", "BlobAlreadyExists") + overwrite_error = ResourceExistsError( + message=message, + response=error.response, + error=error) + overwrite_error.error_code = StorageErrorCode.blob_already_exists + raise overwrite_error + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_block_blob( # pylint: disable=too-many-locals + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count += (16 - (length % 16)) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + # Do single put if the size is smaller than or equal config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + return client.upload( + data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + encryption_options['cek'] = cek + encryption_options['vector'] = iv + block_ids = upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + encryption_options=encryption_options, + **kwargs + ) + else: + block_ids = upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + except StorageErrorException as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier + if encryption_options and encryption_options.get('data'): + headers['x-ms-meta-encryptiondata'] = encryption_options['data'] + blob_tags_string = kwargs.pop('blob_tags_string', None) + + response = client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + encryption_options=encryption_options, + **kwargs) + + except StorageErrorException as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + try: + if overwrite: + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + **kwargs) + except StorageErrorException as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_version.py b/azure/multiapi/storagev2/blob/v2020_02_10/_version.py new file mode 100644 index 0000000..202620c --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.6.0b1" diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py new file mode 100644 index 0000000..247f39e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py @@ -0,0 +1,137 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os + +from .._models import BlobType +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._blob_client_async import BlobClient +from ._container_client_async import ContainerClient +from ._blob_service_client_async import BlobServiceClient +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +async def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Any + **kwargs): + # type: (...) -> dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +async def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = await client.download_blob(**kwargs) + await stream.readinto(handle) + + +async def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Any + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + await _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + await _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py new file mode 100644 index 0000000..3020da3 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py @@ -0,0 +1,2335 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TYPE_CHECKING +) + +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.exceptions import ResourceNotFoundError + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.policies_async import ExponentialRetry +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import get_page_ranges_result, parse_tags +from .._serialize import get_modify_conditions, get_api_version, get_access_conditions +from .._generated import VERSION +from .._generated.aio import AzureBlobStorage +from .._generated.models import StorageErrorException, CpkInfo +from .._deserialize import deserialize_blob_properties +from .._blob_client import BlobClient as BlobClientBase +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob) +from .._models import BlobType, BlobBlock, BlobProperties +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + + +class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobClient, self).__init__( + account_url, + container_name=container_name, + blob_name=blob_name, + snapshot=snapshot, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + @distributed_trace_async + async def get_account_information(self, **kwargs): # type: ignore + # type: (Optional[int]) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob( + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Any + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. + Required if the blob has an active lease. + :paramtype: ~azure.storage.blob.aio.BlobLeaseClient + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 16 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return await upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return await upload_page_blob(**options) + return await upload_append_blob(**options) + + @distributed_trace_async + async def download_blob(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 16 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + **kwargs) + downloader = StorageStreamDownloader(**options) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_blob(self, delete_snapshots=False, **kwargs): + # type: (str, Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 16 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + await self._client.blob.delete(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_blob(self, **kwargs): + # type: (Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 12 + :caption: Undeleting a blob. + """ + try: + await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :param str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :param int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + await self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + except StorageErrorException as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def get_blob_properties(self, **kwargs): + # type: (Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 12 + :caption: Getting the properties for a blob. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + blob_props = await self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return await self._client.blob.set_http_headers(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.set_metadata(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return await self._client.page_blob.create(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.append_blob.create(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 12 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.create_snapshot(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, Any) -> Any + """Copies a blob asynchronously. + + This operation returns a copy operation + object that can be used to wait on the completion of the operation, + as well as check status or abort the copy operation. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call status() on the returned polling object + to check the status of the copy operation, or wait() to block until the + operation is complete. The final blob will be committed when the copy completes. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return await self._client.page_blob.copy_incremental(**options) + return await self._client.blob.start_copy_from_url(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + await self._client.blob.abort_copy_from_url(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + try: + await self._client.blob.set_tier( + tier=standard_blob_tier, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob. + + :param str block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the block_id parameter must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return await self._client.block_blob.stage_block(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block_from_url( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A valid Base64 string value that identifies the + block. Prior to encoding, the string must be less than or equal to 64 + bytes in size. For a given blob, the length of the value specified for + the block_id parameter must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return await self._client.block_blob.stage_block_from_url(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('kease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = await self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + @distributed_trace_async + async def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.block_blob.commit_block_list(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTiermust be specified") + try: + await self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return await self._client.blob.set_tags(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = await self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = await self._client.page_blob.get_page_ranges(**options) + except StorageErrorException as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + except StorageErrorException as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def set_sequence_number( # type: ignore + self, sequence_number_action, # type: Union[str, SequenceNumberAction] + sequence_number=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return await self._client.page_blob.update_sequence_number(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_blob(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return await self._client.page_blob.resize(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return await self._client.page_blob.upload_pages(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def clear_page(self, offset, length, **kwargs): + # type: (int, int, Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return await self._client.page_blob.clear_pages(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def append_block( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return await self._client.append_blob.append_block(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async() + async def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return await self._client.append_blob.append_block_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async() + async def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return await self._client.append_blob.seal(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py new file mode 100644 index 0000000..8642282 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py @@ -0,0 +1,641 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.async_paging import AsyncItemPaged + +from .._shared.models import LocationMode +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._shared.parser import _to_utc_datetime +from .._shared.response_handlers import parse_to_internal_user_delegation_key +from .._generated import VERSION +from .._generated.aio import AzureBlobStorage +from .._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo +from .._blob_service_client import BlobServiceClient as BlobServiceClientBase +from ._container_client_async import ContainerClient +from ._blob_client_async import BlobClient +from .._models import ContainerProperties +from .._deserialize import service_stats_deserialize, service_properties_deserialize +from .._serialize import get_api_version +from ._models import ContainerPropertiesPaged, FilteredBlobPaged + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.pipeline.transport import HttpTransport + from azure.core.pipeline.policies import HTTPPolicy + from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey + from ._lease_async import BlobLeaseClient + from .._models import ( + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + ) + + +class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + @distributed_trace_async + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 12 + :caption: Getting account information for the blob service. + """ + try: + return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_stats(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 12 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 12 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 12 + :caption: Setting service properties for the blob service. + """ + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> AsyncItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 16 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace_async + async def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 16 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + timeout = kwargs.pop('timeout', None) + kwargs.setdefault('merge_span', True) + await container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace_async + async def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 16 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword str new_name: + The new name for the deleted container to be restored to. + If not specified deleted_container_name will be used as the restored container name. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.aio.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + container = self.get_container_client(new_name or deleted_container_name) + try: + await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except StorageErrorException as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 12 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by + :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 16 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py new file mode 100644 index 0000000..730a1fd --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py @@ -0,0 +1,1121 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, + TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.async_paging import AsyncItemPaged +from azure.core.pipeline import AsyncPipeline +from azure.core.pipeline.transport import AsyncHttpResponse + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized) +from .._generated import VERSION +from .._generated.aio import AzureBlobStorage +from .._generated.models import ( + StorageErrorException, + SignedIdentifier) +from .._deserialize import deserialize_container_properties +from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name +from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import +from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix +from ._lease_async import BlobLeaseClient +from ._blob_client_async import BlobClient + +if TYPE_CHECKING: + from .._models import PublicAccess + from ._download_async import StorageStreamDownloader + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + AccessPolicy, + StandardBlobTier, + PremiumPageBlobTier) + + +class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 12 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(ContainerClient, self).__init__( + account_url, + container_name=container_name, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + @distributed_trace_async + async def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 16 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + timeout = kwargs.pop('timeout', None) + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return await self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 16 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + await self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_container_properties(self, **kwargs): + # type: (**Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace_async + async def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 16 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs # type: Any + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 16 + :caption: Setting access policy on the container. + """ + timeout = kwargs.pop('timeout', None) + lease = kwargs.pop('lease', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + try: + return await self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] or str include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 12 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged + ) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Any] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace_async + async def upload_blob( + self, name, # type: Union[str, BlobProperties] + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> BlobClient + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 12 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + await blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace_async + async def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a Lease object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + blob = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await blob.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def download_blob(self, blob, offset=None, length=None, **kwargs): + # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object. (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return await blob_client.download_blob( + offset=offset, + length=length, + **kwargs) + + @distributed_trace_async + async def delete_blobs( # pylint: disable=arguments-differ + self, *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 12 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace + async def set_standard_blob_tier_blobs( + self, + standard_blob_tier: Union[str, 'StandardBlobTier'], + *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace + async def set_premium_page_blob_tier_blobs( + self, + premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], + *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set on all blobs to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[BlobProperties, str] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 12 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py new file mode 100644 index 0000000..c698cb4 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py @@ -0,0 +1,491 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import sys +from io import BytesIO +from itertools import islice +import warnings + +from azure.core.exceptions import HttpResponseError +from .._shared.encryption import decrypt_blob +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._deserialize import get_page_ranges_result +from .._download import process_range_and_offset, _ChunkDownloader + + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = data.response.body() + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = response.properties.etag + + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader): + self.size = size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # If no iterator was supplied, the download completed with + # the initial GET, so we just return that data + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + else: + try: + chunk = next(self._iter_chunks) + except StopIteration: + raise StopAsyncIteration("Download complete") + self._current_content = await self._iter_downloader.yield_chunk(chunk) + + return self._current_content + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + try: + location_mode, response = await self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = await self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size != self.size: + # Lock on the etag. This can be overriden by the user by specifying '*' + if self._request_options.get('modified_access_conditions'): + if not self._request_options['modified_access_conditions'].if_match: + self._request_options['modified_access_conditions'].if_match = response.properties.etag + else: + self._download_complete = True + return response + + def chunks(self): + """Iterate over chunks in the download stream. + + :rtype: Iterable[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader) + + async def readall(self): + """Download the contents of this blob. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + _done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + await asyncio.wait(running_futures) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py new file mode 100644 index 0000000..91bf93d --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py @@ -0,0 +1,327 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._generated.models import ( + StorageErrorException, + LeaseAccessConditions) +from .._serialize import get_modify_conditions +from .._lease import BlobLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + from .._generated.operations import BlobOperations, ContainerOperations + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(LeaseClientBase): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.aio.BlobClient or + ~azure.storage.blob.aio.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. "\"tagname\"='my tag'" + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py new file mode 100644 index 0000000..dc09846 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py @@ -0,0 +1,162 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged +from .._deserialize import get_blob_properties_from_generated_code +from .._models import BlobProperties +from .._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix +from .._shared.models import DictMixin +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The container that the blobs are listed from. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefix(AsyncItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token of the current page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + async def _extract_data_cb(self, get_next_return): + continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py new file mode 100644 index 0000000..44d5d63 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py @@ -0,0 +1,141 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator + +from .._models import ContainerProperties, FilteredBlob +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + +from .._generated.models import StorageErrorException +from .._generated.models import FilterBlobItem + + +class ContainerPropertiesPaged(AsyncPageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class FilteredBlobPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + blob = FilteredBlob(name=item.name, container_name=item.container_name, tag_value=item.tag_value) # pylint: disable=protected-access + return blob + return item diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py new file mode 100644 index 0000000..3a495b5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py @@ -0,0 +1,266 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.exceptions import ResourceModifiedError + +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers) +from .._shared.uploads_async import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader) +from .._shared.encryption import generate_blob_encryption_data, encrypt_blob +from .._generated.models import ( + StorageErrorException, + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) +from .._upload_helpers import _convert_mod_error, _any_conditions + +if TYPE_CHECKING: + from datetime import datetime # pylint: disable=unused-import + BlobLeaseClient = TypeVar("BlobLeaseClient") + + +async def upload_block_blob( # pylint: disable=too-many-locals + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count += (16 - (length % 16)) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + # Do single put if the size is smaller than config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + return await client.upload( + data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + encryption_options['cek'] = cek + encryption_options['vector'] = iv + block_ids = await upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + encryption_options=encryption_options, + **kwargs + ) + else: + block_ids = await upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return await client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + except StorageErrorException as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier + if encryption_options and encryption_options.get('data'): + headers['x-ms-meta-encryptiondata'] = encryption_options['data'] + blob_tags_string = kwargs.pop('blob_tags_string', None) + + response = await client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return await upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + encryption_options=encryption_options, + **kwargs) + + except StorageErrorException as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + try: + if overwrite: + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + **kwargs) + except StorageErrorException as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py new file mode 100644 index 0000000..0cfdefd --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py @@ -0,0 +1,93 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._download import StorageStreamDownloader +from ._data_lake_file_client import DataLakeFileClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._file_system_client import FileSystemClient +from ._data_lake_service_client import DataLakeServiceClient +from ._data_lake_lease import DataLakeLeaseClient +from ._models import ( + LocationMode, + ResourceTypes, + FileSystemProperties, + FileSystemPropertiesPaged, + DirectoryProperties, + FileProperties, + PathProperties, + LeaseProperties, + ContentSettings, + AccountSasPermissions, + FileSystemSasPermissions, + DirectorySasPermissions, + FileSasPermissions, + UserDelegationKey, + PublicAccess, + AccessPolicy, + DelimitedTextDialect, + DelimitedJsonDialect, + ArrowDialect, + ArrowType, + DataLakeFileQueryError, + DataLakeAclChangeFailedError, + AccessControlChangeResult, + AccessControlChangeCounters, + AccessControlChangeFailure, + AccessControlChanges, +) + +from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ + generate_file_sas + +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import StorageErrorCode +from ._version import VERSION + +__version__ = VERSION + +__all__ = [ + 'DataLakeServiceClient', + 'FileSystemClient', + 'DataLakeFileClient', + 'DataLakeDirectoryClient', + 'DataLakeLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'PublicAccess', + 'AccessPolicy', + 'ResourceTypes', + 'StorageErrorCode', + 'UserDelegationKey', + 'FileSystemProperties', + 'FileSystemPropertiesPaged', + 'DirectoryProperties', + 'FileProperties', + 'PathProperties', + 'LeaseProperties', + 'ContentSettings', + 'AccessControlChangeResult', + 'AccessControlChangeCounters', + 'AccessControlChangeFailure', + 'AccessControlChanges', + 'AccountSasPermissions', + 'FileSystemSasPermissions', + 'DirectorySasPermissions', + 'FileSasPermissions', + 'generate_account_sas', + 'generate_file_system_sas', + 'generate_directory_sas', + 'generate_file_sas', + 'VERSION', + 'StorageStreamDownloader', + 'DelimitedTextDialect', + 'DelimitedJsonDialect', + 'DataLakeFileQueryError', + 'DataLakeAclChangeFailedError', + 'ArrowDialect', + 'ArrowType', + 'DataLakeFileQueryError' +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py new file mode 100644 index 0000000..c1c4ad8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py @@ -0,0 +1,539 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore +from ._deserialize import deserialize_dir_properties +from ._shared.base_client import parse_connection_str +from ._data_lake_file_client import DataLakeFileClient +from ._models import DirectoryProperties +from ._path_client import PathClient + + +class DataLakeDirectoryClient(PathClient): + """A client to interact with the DataLake directory, even if the directory may not yet exist. + + For operations relating to a specific subdirectory or file under the directory, a directory client or file client + can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param directory_name: + The whole path of the directory. eg. {directory under file system}/{directory to interact with} + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client.py + :start-after: [START instantiate_directory_client_from_conn_str] + :end-before: [END instantiate_directory_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, + credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeDirectoryClient + """ + Create DataLakeDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: + The name of file system to interact with. + :type file_system_name: str + :param directory_name: + The name of directory to interact with. The directory is under file system. + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeDirectoryClient + :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, directory_name=directory_name, + credential=credential, **kwargs) + + def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new directory. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 8 + :caption: Create directory. + """ + return self._create('directory', metadata=metadata, **kwargs) + + def delete_directory(self, **kwargs): + # type: (...) -> None + """ + Marks the specified directory for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 4 + :caption: Delete directory. + """ + return self._delete(**kwargs) + + def get_directory_properties(self, **kwargs): + # type: (**Any) -> DirectoryProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the directory. It does not return the content of the directory. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START get_directory_properties] + :end-before: [END get_directory_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file/directory. + """ + return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access + + def rename_directory(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeDirectoryClient + """ + Rename the source directory. + + :param str new_name: + the new directory name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}". + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START rename_directory] + :end-before: [END rename_directory] + :language: python + :dedent: 4 + :caption: Rename the source directory. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new file") + if not self._raw_credential and new_file_system == self.file_system_name: + new_dir_sas = self._query_str.strip('?') + + new_directory_client = DataLakeDirectoryClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, + credential=self._raw_credential or new_dir_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + new_directory_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_directory_client + + def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create a subdirectory and return the subdirectory client to be interacted with. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory. + """ + subdir = self.get_sub_directory_client(sub_directory) + subdir.create_directory(metadata=metadata, **kwargs) + return subdir + + def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified subdirectory for deletion. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory + """ + subdir = self.get_sub_directory_client(sub_directory) + subdir.delete_directory(**kwargs) + return subdir + + def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create a new file and return the file client to be interacted with. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + """ + file_client = self.get_file_client(file) + file_client.create_file(**kwargs) + return file_client + + def get_file_client(self, file # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. eg. directory/subdirectory/file + :type file: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake..DataLakeFileClient + """ + try: + file_path = file.name + except AttributeError: + file_path = self.path_name + '/' + file + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified subdirectory of the current directory. + + The sub subdirectory need not already exist. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + try: + subdir_path = sub_directory.name + except AttributeError: + subdir_path = self.path_name + '/' + sub_directory + + return DataLakeDirectoryClient( + self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py new file mode 100644 index 0000000..7effeb3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py @@ -0,0 +1,753 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from io import BytesIO + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + +from ._quick_query_helper import DataLakeFileQueryReader +from ._shared.base_client import parse_connection_str +from ._shared.request_handlers import get_length, read_length +from ._shared.response_handlers import return_response_headers +from ._shared.uploads import IterStreamer +from ._upload_helper import upload_datalake_file +from ._generated.models import StorageErrorException +from ._download import StorageStreamDownloader +from ._path_client import PathClient +from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \ + convert_datetime_to_rfc1123 +from ._deserialize import process_storage_error, deserialize_file_properties +from ._models import FileProperties, DataLakeFileQueryError + + +class DataLakeFileClient(PathClient): + """A client to interact with the DataLake file, even if the file may not yet exist. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param file_path: + The whole file path, so that to interact with a specific file. + eg. "{directory}/{subdirectory}/{file}" + :type file_path: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client.py + :start-after: [START instantiate_file_client_from_conn_str] + :end-before: [END instantiate_file_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, + credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeFileClient + """ + Create DataLakeFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: The name of file system to interact with. + :type file_system_name: str + :param directory_name: The name of directory to interact with. The directory is under file system. + :type directory_name: str + :param file_name: The name of file to interact with. The file is under directory. + :type file_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeFileClient + :rtype ~azure.storage.filedatalake.DataLakeFileClient + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, file_path=file_path, + credential=credential, **kwargs) + + def create_file(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new file. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 4 + :caption: Create file. + """ + return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) + + def delete_file(self, **kwargs): + # type: (...) -> None + """ + Marks the specified file for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 4 + :caption: Delete file. + """ + return self._delete(**kwargs) + + def get_file_properties(self, **kwargs): + # type: (**Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. It does not return the content of the file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START get_file_properties] + :end-before: [END get_file_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file. + """ + return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access + + def set_file_expiry(self, expiry_options, # type: str + expires_on=None, # type: Optional[Union[datetime, int]] + **kwargs): + # type: (str, Optional[Union[datetime, int]], **Any) -> None + """Sets the time a file will expire and be deleted. + + :param str expiry_options: + Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' + :param datetime or int expires_on: + The time to set the file to expiry. + When expiry_options is RelativeTo*, expires_on should be an int in milliseconds. + If the type of expires_on is datetime, it should be in UTC time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + expires_on = convert_datetime_to_rfc1123(expires_on) + except AttributeError: + expires_on = str(expires_on) + self._datalake_client_for_blob_operation.path \ + .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access + + def _upload_options( # pylint:disable=too-many-statements + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, six.text_type): + data = data.encode(encoding) # type: ignore + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + + kwargs['properties'] = add_metadata_headers(metadata) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) + + if content_settings: + kwargs['path_http_headers'] = get_path_http_headers(content_settings) + + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['validate_content'] = validate_content + kwargs['max_concurrency'] = max_concurrency + kwargs['client'] = self._client.path + + return kwargs + + def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + overwrite=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Upload data to a file. + + :param data: Content to be uploaded to file + :param int length: Size of the data in bytes. + :param bool overwrite: to overwrite an existing file or not. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the blob as metadata. + :paramtype metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. + :return: response dict (Etag and last modified). + """ + options = self._upload_options( + data, + length=length, + overwrite=overwrite, + **kwargs) + return upload_datalake_file(**options) + + @staticmethod + def _append_data_options(data, offset, length=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + options = { + 'body': data, + 'position': offset, + 'content_length': length, + 'lease_access_conditions': access_conditions, + 'validate_content': kwargs.pop('validate_content', False), + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + offset, # type: int + length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Append data to the file. + + :param data: Content to be appended to file + :param offset: start position of the data to be appended to. + :param length: Size of the data in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :return: dict of the response header + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START append_data] + :end-before: [END append_data] + :language: python + :dedent: 4 + :caption: Append data to the file. + """ + options = self._append_data_options( + data, + offset, + length=length, + **kwargs) + try: + return self._client.path.append_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'position': offset, + 'content_length': 0, + 'path_http_headers': path_http_headers, + 'retain_uncommitted_data': retain_uncommitted_data, + 'close': kwargs.pop('close', False), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def flush_data(self, offset, # type: int + retain_uncommitted_data=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ Commit the previous appended data. + + :param offset: offset is equal to the length of the file after commit the + previous appended data. + :param bool retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword bool close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :return: response header in dict + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START upload_file_to_file_system] + :end-before: [END upload_file_to_file_system] + :language: python + :dedent: 8 + :caption: Commit the previous appended data. + """ + options = self._flush_data_options( + offset, + retain_uncommitted_data=retain_uncommitted_data, **kwargs) + try: + return self._client.path.flush_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + def download_file(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content, or readinto() must be used to download the file into + a stream. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword lease: + If specified, download only succeeds if the file's lease is active + and matches this ID. Required if the file has an active lease. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.filedatalake.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START read_file] + :end-before: [END read_file] + :language: python + :dedent: 4 + :caption: Return the downloaded data. + """ + downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) + return StorageStreamDownloader(downloader) + + def rename_file(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeFileClient + """ + Rename the source file. + + :param str new_name: the new file name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: the renamed file client + :rtype: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START rename_file] + :end-before: [END rename_file] + :language: python + :dedent: 4 + :caption: Rename the source file. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_file_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new file") + if not self._raw_credential and new_file_system == self.file_system_name: + new_file_sas = self._query_str.strip('?') + + new_file_client = DataLakeFileClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, + credential=self._raw_credential or new_file_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + new_file_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_file_client + + def query_file(self, query_expression, **kwargs): + # type: (str, **Any) -> DataLakeFileQueryReader + """ + Enables users to select/project on datalake file data by providing simple query expressions. + This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + eg. Select * from DataLakeStorage + :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error: + A function to be called on any processing errors returned by the service. + :keyword file_format: + Optional. Defines the serialization of the data currently stored in the file. The default is to + treat the file data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. + :paramtype file_format: + ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the file. By providing an output format, the file data will be reformatted + according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. + :paramtype output_format: + ~azure.storage.filedatalake.DelimitedTextDialect, ~azure.storage.filedatalake.DelimitedJsonDialect + or list[~azure.storage.filedatalake.ArrowDialect] + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (DataLakeFileQueryReader) + :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on datalake file data by providing simple query expressions. + """ + query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") + blob_quick_query_reader = self._blob_client.query_blob(query_expression, + blob_format=kwargs.pop('file_format', None), + error_cls=DataLakeFileQueryError, + **kwargs) + return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py new file mode 100644 index 0000000..3643601 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py @@ -0,0 +1,245 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, + TypeVar, TYPE_CHECKING +) +from azure.multiapi.storagev2.blob.v2019_07_07 import BlobLeaseClient + + +if TYPE_CHECKING: + from datetime import datetime + FileSystemClient = TypeVar("FileSystemClient") + DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") + DataLakeFileClient = TypeVar("DataLakeFileClient") + + +class DataLakeLeaseClient(object): + """Creates a new DataLakeLeaseClient. + + This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file system, directory, or file to lease. + :type client: ~azure.storage.filedatalake.FileSystemClient or + ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + + if hasattr(client, '_blob_client'): + _client = client._blob_client # type: ignore # pylint: disable=protected-access + elif hasattr(client, '_container_client'): + _client = client._container_client # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") + + self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Optional[int], **Any) -> None + """Requests a new lease. + + If the file/file system does not have an active lease, the DataLake service creates a + lease on the file/file system and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) + self._update_lease_client_attributes() + + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the file system or file. Note that + the lease may be renewed even if it has expired as long as the file system + or file has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.renew(**kwargs) + self._update_lease_client_attributes() + + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the file system or file. Releasing the lease allows another client + to immediately acquire the lease for the file system or file as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.release(**kwargs) + self._update_lease_client_attributes() + + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) + self._update_lease_client_attributes() + + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the file system or file has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the file system or file. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) + + def _update_lease_client_attributes(self): + self.id = self._blob_lease_client.id # type: str + self.last_modified = self._blob_lease_client.last_modified # type: datetime + self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py new file mode 100644 index 0000000..d2bccd6 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py @@ -0,0 +1,421 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged + +from azure.multiapi.storagev2.blob.v2019_07_07 import BlobServiceClient +from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str +from ._file_system_client import FileSystemClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._data_lake_file_client import DataLakeFileClient +from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode +from ._serialize import convert_dfs_url_to_blob_url + + +class DataLakeServiceClient(StorageAccountHostsMixin): + """A client to interact with the DataLake Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete file systems within the account. + For operations relating to a specific file system, directory or file, clients for those entities + can also be retrieved using the `get_client` functions. + + :ivar str url: + The full endpoint URL to the datalake service endpoint. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URL to the DataLake storage account. Any other entities included + in the URL path (e.g. file system or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient from connection string. + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + self._blob_account_url = blob_account_url + self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) + self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access + + _, sas_token = parse_query(parsed_url.query) + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', + credential=self._raw_credential, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + + def __exit__(self, *args): + self._blob_service_client.close() + super(DataLakeServiceClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._blob_service_client.close() + self.__exit__() + + def _format_url(self, hostname): + """Format the endpoint URL according to hostname + """ + formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) + return formated_url + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeServiceClient + """ + Create DataLakeServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeServiceClient + :rtype ~azure.storage.filedatalake.DataLakeServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_data_lake_service_client_from_conn_str] + :end-before: [END create_data_lake_service_client_from_conn_str] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient from a connection string. + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls(account_url, credential=credential, **kwargs) + + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.filedatalake.UserDelegationKey + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_user_delegation_key] + :end-before: [END get_user_delegation_key] + :language: python + :dedent: 8 + :caption: Get user delegation key from datalake service client. + """ + delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, + key_expiry_time=key_expiry_time, + **kwargs) # pylint: disable=protected-access + return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + + def list_file_systems(self, name_starts_with=None, # type: Optional[str] + include_metadata=None, # type: Optional[bool] + **kwargs): + # type: (...) -> ItemPaged[FileSystemProperties] + """Returns a generator to list the file systems under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all file systems have been returned. + + :param str name_starts_with: + Filters the results to return only file systems whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that file system metadata be returned in the response. + The default value is `False`. + :keyword int results_per_page: + The maximum number of file system names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of FileSystemProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START list_file_systems] + :end-before: [END list_file_systems] + :language: python + :dedent: 8 + :caption: Listing the file systems in the datalake service. + """ + item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, + include_metadata=include_metadata, + **kwargs) # pylint: disable=protected-access + item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access + return item_paged + + def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> FileSystemClient + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param str file_system: + The name of the file system to create. + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: file system, file. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_file_system_from_service_client] + :end-before: [END create_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Creating a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) + return file_system_client + + def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] + **kwargs): + # type: (...) -> FileSystemClient + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :param file_system: + The file system to delete. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START delete_file_system_from_service_client] + :end-before: [END delete_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Deleting a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + file_system_client.delete_file_system(**kwargs) + return file_system_client + + def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] + ): + # type: (...) -> FileSystemClient + """Get a client to interact with the specified file system. + + The file system need not already exist. + + :param file_system: + The file system. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :returns: A FileSystemClient. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Getting the file system client to interact with a specific file system. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + + return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, + _configuration=self._config, + _pipeline=self._pipeline, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] + directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param file_system: + The file system that the directory is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_directory_client_from_service_client] + :end-before: [END get_directory_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] + file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_system: + The file system that the file is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param file_path: + The file with which to interact. This can either be the full path of the file(from the root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake..DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_file_client_from_service_client] + :end-before: [END get_file_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py new file mode 100644 index 0000000..f54a82b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py @@ -0,0 +1,146 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +from typing import ( # pylint: disable=unused-import + TYPE_CHECKING +) + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ + ResourceNotFoundError, ResourceExistsError +from ._models import FileProperties, DirectoryProperties, LeaseProperties +from ._shared.models import StorageErrorCode + +if TYPE_CHECKING: + pass + +_LOGGER = logging.getLogger(__name__) + + +def deserialize_dir_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + dir_properties = DirectoryProperties( + metadata=metadata, + **headers + ) + return dir_properties + + +def deserialize_file_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = FileProperties( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties + + +def from_blob_properties(blob_properties): + file_props = FileProperties() + file_props.name = blob_properties.name + file_props.etag = blob_properties.etag + file_props.deleted = blob_properties.deleted + file_props.metadata = blob_properties.metadata + file_props.lease = blob_properties.lease + file_props.lease.__class__ = LeaseProperties + file_props.last_modified = blob_properties.last_modified + file_props.creation_time = blob_properties.creation_time + file_props.size = blob_properties.size + file_props.deleted_time = blob_properties.deleted_time + file_props.remaining_retention_days = blob_properties.remaining_retention_days + file_props.content_settings = blob_properties.content_settings + return file_props + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = value + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_headers_and_deserialized_path_list(response, deserialized, response_headers): # pylint: disable=unused-argument + return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers) + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body: + if info == 'code': + error_code = error_body[info] + elif info == 'message': + error_message = error_body[info] + else: + additional_data[info] = error_body[info] + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.invalid_property_name, + StorageErrorCode.invalid_source_uri, + StorageErrorCode.source_path_not_found, + StorageErrorCode.lease_name_mismatch, + StorageErrorCode.file_system_not_found, + StorageErrorCode.path_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.invalid_destination_path, + StorageErrorCode.invalid_rename_source_path, + StorageErrorCode.lease_is_already_broken, + StorageErrorCode.invalid_source_or_destination_resource_type, + StorageErrorCode.rename_destination_parent_path_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.source_path_is_being_deleted, + StorageErrorCode.path_already_exists, + StorageErrorCode.destination_path_is_being_deleted, + StorageErrorCode.file_system_already_exists, + StorageErrorCode.file_system_being_deleted, + StorageErrorCode.path_conflict]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py new file mode 100644 index 0000000..e4efd8c --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from ._deserialize import from_blob_properties + + +class StorageStreamDownloader(object): + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar ~azure.storage.filedatalake.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__(self, downloader): + self._downloader = downloader + self.name = self._downloader.name + self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access + self.size = self._downloader.size + + def __len__(self): + return self.size + + def chunks(self): + return self._downloader.chunks() + + def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + return self._downloader.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py new file mode 100644 index 0000000..ce6167f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py @@ -0,0 +1,783 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import functools + +try: + from urllib.parse import urlparse, quote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote # type: ignore + +import six +from azure.core.paging import ItemPaged +from azure.multiapi.storagev2.blob.v2019_07_07 import ContainerClient +from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str +from ._serialize import convert_dfs_url_to_blob_url +from ._models import LocationMode, FileSystemProperties, PublicAccess +from ._list_paths_helper import PathPropertiesPaged +from ._data_lake_file_client import DataLakeFileClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._data_lake_lease import DataLakeLeaseClient +from ._generated import DataLakeStorageClient + + +class FileSystemClient(StorageAccountHostsMixin): + """A client to interact with a specific file system, even if that file system + may not yet exist. + + For operations relating to a specific directory or file within this file system, a directory client or file client + can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not file_system_name: + raise ValueError("Please specify a file system name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + # TODO: add self.account_url to base_client and remove _blob_account_url + self._blob_account_url = blob_account_url + + datalake_hosts = kwargs.pop('_hosts', None) + blob_hosts = None + if datalake_hosts: + blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + self._container_client = ContainerClient(blob_account_url, file_system_name, + credential=credential, _hosts=blob_hosts, **kwargs) + + _, sas_token = parse_query(parsed_url.query) + self.file_system_name = file_system_name + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, + _hosts=datalake_hosts, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) + + def _format_url(self, hostname): + file_system_name = self.file_system_name + if isinstance(file_system_name, six.text_type): + file_system_name = file_system_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(file_system_name), + self._query_str) + + def __exit__(self, *args): + self._container_client.close() + super(FileSystemClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._container_client.close() + self.__exit__() + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> FileSystemClient + """ + Create FileSystemClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: The name of file system to interact with. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a FileSystemClient + :rtype ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_connection_string] + :end-before: [END create_file_system_client_from_connection_string] + :language: python + :dedent: 8 + :caption: Create FileSystemClient from connection string + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, credential=credential, **kwargs) + + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file system does not have an active lease, + the DataLake service creates a lease on the file system and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system] + :end-before: [END create_file_system] + :language: python + :dedent: 12 + :caption: Creating a file system in the datalake service. + """ + return self._container_client.create_container(metadata=metadata, + public_access=public_access, + **kwargs) + + def delete_file_system(self, **kwargs): + # type: (Any) -> None + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_file_system] + :end-before: [END delete_file_system] + :language: python + :dedent: 12 + :caption: Deleting a file system in the datalake service. + """ + self._container_client.delete_container(**kwargs) + + def get_file_system_properties(self, **kwargs): + # type: (Any) -> FileSystemProperties + """Returns all user-defined metadata and system properties for the specified + file system. The data returned does not include the file system's list of paths. + + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, get_file_system_properties only succeeds if the + file system's lease is active and matches this ID. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified file system within a file system object. + :rtype: ~azure.storage.filedatalake.FileSystemProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_file_system_properties] + :end-before: [END get_file_system_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the file system. + """ + container_properties = self._container_client.get_container_properties(**kwargs) + return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access + + def set_file_system_metadata( # type: ignore + self, metadata, # type: Dict[str, str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: filesystem-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the file system. + """ + return self._container_client.set_container_metadata(metadata=metadata, **kwargs) + + def set_file_system_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified file system or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a file system may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the file system. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] + :param ~azure.storage.filedatalake.PublicAccess public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :keyword lease: + Required if the file system has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File System-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + """ + return self._container_client.set_container_access_policy(signed_identifiers, + public_access=public_access, **kwargs) + + def get_file_system_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified file system. + The permissions indicate whether file system data may be accessed publicly. + + :keyword lease: + If specified, the operation only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_policy = self._container_client.get_container_access_policy(**kwargs) + return { + 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access + 'signed_identifiers': access_policy['signed_identifiers'] + } + + def get_paths(self, path=None, # type: Optional[str] + recursive=True, # type: Optional[bool] + max_results=None, # type: Optional[int] + **kwargs): + # type: (...) -> ItemPaged[PathProperties] + """Returns a generator to list the paths(could be files or directories) under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str path: + Filters the results to return only paths under the specified path. + :param int max_results: An optional value that specifies the maximum + number of items to return per page. If omitted or greater than 5,000, the + response will include up to 5,000 items per page. + :keyword upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of PathProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_paths_in_file_system] + :end-before: [END get_paths_in_file_system] + :language: python + :dedent: 8 + :caption: List the paths in the file system. + """ + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.file_system.list_paths, + path=path, + timeout=timeout, + **kwargs) + return ItemPaged( + command, recursive, path=path, max_results=max_results, + page_iterator_class=PathPropertiesPaged, **kwargs) + + def create_directory(self, directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create directory + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_directory_from_file_system] + :end-before: [END create_directory_from_file_system] + :language: python + :dedent: 8 + :caption: Create directory in the file system. + """ + directory_client = self.get_directory_client(directory) + directory_client.create_directory(metadata=metadata, **kwargs) + return directory_client + + def delete_directory(self, directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified path for deletion. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_directory_from_file_system] + :end-before: [END delete_directory_from_file_system] + :language: python + :dedent: 8 + :caption: Delete directory in the file system. + """ + directory_client = self.get_directory_client(directory) + directory_client.delete_directory(**kwargs) + return directory_client + + def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create file + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_from_file_system] + :end-before: [END create_file_from_file_system] + :language: python + :dedent: 8 + :caption: Create file in the file system. + """ + file_client = self.get_file_client(file) + file_client.create_file(**kwargs) + return file_client + + def delete_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Marks the specified file for deletion. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_file_from_file_system] + :end-before: [END delete_file_from_file_system] + :language: python + :dedent: 8 + :caption: Delete file in the file system. + """ + file_client = self.get_file_client(file) + file_client.delete_file(**kwargs) + return file_client + + def _get_root_directory_client(self): + # type: () -> DataLakeDirectoryClient + """Get a client to interact with the root directory. + + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + return self.get_directory_client('/') + + def get_directory_client(self, directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_directory_client_from_file_system] + :end-before: [END get_directory_client_from_file_system] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_path: + The file with which to interact. This can either be the path of the file(from root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake..DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_file_client_from_file_system] + :end-before: [END get_file_client_from_file_system] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py new file mode 100644 index 0000000..2c90133 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._data_lake_storage_client import DataLakeStorageClient +__all__ = ['DataLakeStorageClient'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py new file mode 100644 index 0000000..ab73595 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .version import VERSION + + +class DataLakeStorageClientConfiguration(Configuration): + """Configuration for DataLakeStorageClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + :ivar resource: The value must be "filesystem" for all filesystem + operations. + :type resource: str + :ivar version: Specifies the version of the operation to use for this + request. + :type version: str + """ + + def __init__(self, url, file_system, path1, **kwargs): + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) + self.generate_client_request_id = True + + self.url = url + self.file_system = file_system + self.path1 = path1 + self.resource = "filesystem" + self.version = "2020-02-10" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py new file mode 100644 index 0000000..ae9969b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import PipelineClient +from msrest import Serializer, Deserializer + +from ._configuration import DataLakeStorageClientConfiguration +from azure.core.exceptions import map_error +from .operations import ServiceOperations +from .operations import FileSystemOperations +from .operations import PathOperations +from . import models + + +class DataLakeStorageClient(object): + """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. + + + :ivar service: Service operations + :vartype service: azure.storage.filedatalake.operations.ServiceOperations + :ivar file_system: FileSystem operations + :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations + :ivar path: Path operations + :vartype path: azure.storage.filedatalake.operations.PathOperations + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + """ + + def __init__(self, url, file_system, path1, **kwargs): + + base_url = '{url}' + self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-02-10' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file_system = FileSystemOperations( + self._client, self._config, self._serialize, self._deserialize) + self.path = PathOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py new file mode 100644 index 0000000..5f09159 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._data_lake_storage_client_async import DataLakeStorageClient +__all__ = ['DataLakeStorageClient'] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py new file mode 100644 index 0000000..3fcd104 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ..version import VERSION + + +class DataLakeStorageClientConfiguration(Configuration): + """Configuration for DataLakeStorageClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + :ivar resource: The value must be "filesystem" for all filesystem + operations. + :type resource: str + :ivar version: Specifies the version of the operation to use for this + request. + :type version: str + """ + + def __init__(self, url, file_system, path1, **kwargs): + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) + self.generate_client_request_id = True + self.accept_language = None + + self.url = url + self.file_system = file_system + self.path1 = path1 + self.resource = "filesystem" + self.version = "2020-02-10" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py new file mode 100644 index 0000000..3486d5c --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import AsyncPipelineClient +from msrest import Serializer, Deserializer + +from ._configuration_async import DataLakeStorageClientConfiguration +from azure.core.exceptions import map_error +from .operations_async import ServiceOperations +from .operations_async import FileSystemOperations +from .operations_async import PathOperations +from .. import models + + +class DataLakeStorageClient(object): + """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. + + + :ivar service: Service operations + :vartype service: azure.storage.filedatalake.aio.operations_async.ServiceOperations + :ivar file_system: FileSystem operations + :vartype file_system: azure.storage.filedatalake.aio.operations_async.FileSystemOperations + :ivar path: Path operations + :vartype path: azure.storage.filedatalake.aio.operations_async.PathOperations + + :param url: The URL of the service account, container, or blob that is the + targe of the desired operation. + :type url: str + :param file_system: The filesystem identifier. + :type file_system: str + :param path1: The file or directory path. + :type path1: str + """ + + def __init__( + self, url, file_system, path1, **kwargs): + + base_url = '{url}' + self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-02-10' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file_system = FileSystemOperations( + self._client, self._config, self._serialize, self._deserialize) + self.path = PathOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py new file mode 100644 index 0000000..1190e52 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations_async import ServiceOperations +from ._file_system_operations_async import FileSystemOperations +from ._path_operations_async import PathOperations + +__all__ = [ + 'ServiceOperations', + 'FileSystemOperations', + 'PathOperations', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py new file mode 100644 index 0000000..f1af068 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py @@ -0,0 +1,462 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class FileSystemOperations: + """FileSystemOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + async def create(self, properties=None, request_id=None, timeout=None, *, cls=None, **kwargs): + """Create FileSystem. + + Create a FileSystem rooted at the specified location. If the FileSystem + already exists, the operation fails. This operation does not support + conditional HTTP requests. + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}'} + + async def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set FileSystem Properties. + + Set properties for the FileSystem. This operation supports conditional + HTTP requests. For more information, see [Specifying Conditional + Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{filesystem}'} + + async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwargs): + """Get FileSystem Properties. + + All system and user-defined filesystem properties are specified in the + response headers. + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}'} + + async def delete(self, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Delete FileSystem. + + Marks the FileSystem for deletion. When a FileSystem is deleted, a + FileSystem with the same identifier cannot be created for at least 30 + seconds. While the filesystem is being deleted, attempts to create a + filesystem with the same identifier will fail with status code 409 + (Conflict), with the service returning additional error information + indicating that the filesystem is being deleted. All other operations, + including operations on any files or directories within the filesystem, + will fail with status code 404 (Not Found) while the filesystem is + being deleted. This operation supports conditional HTTP requests. For + more information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}'} + + async def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, *, cls=None, **kwargs): + """List Paths. + + List FileSystem paths and their properties. + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param path: Optional. Filters results to paths within the specified + directory. An error occurs if the directory does not exist. + :type path: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: PathList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.PathList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_paths.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if path is not None: + query_parameters['directory'] = self._serialize.query("path", path, 'str') + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PathList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py new file mode 100644 index 0000000..28f0999 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py @@ -0,0 +1,1698 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class PathOperations: + """PathOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar comp: . Constant value: "expiry". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.comp = "expiry" + + async def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): + """Create File | Create Directory | Rename File | Rename Directory. + + Create or rename a file or directory. By default, the destination is + overwritten and if the destination already exists and has a lease the + lease is broken. This operation supports conditional HTTP requests. + For more information, see [Specifying Conditional Headers for Blob + Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param resource: Required only for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource: str or + ~azure.storage.filedatalake.models.PathResourceType + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param mode: Optional. Valid only when namespace is enabled. This + parameter determines the behavior of the rename operation. The value + must be "legacy" or "posix", and the default value will be "posix". + Possible values include: 'legacy', 'posix' + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode + :param rename_source: An optional file or directory to be renamed. + The value must have the following format: "/{filesystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. This value must be a URL percent-encoded string. Note that + the string may only contain ASCII characters in the ISO-8859-1 + character set. + :type rename_source: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the lease ID must match. + :type source_lease_id: str + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param umask: Optional and only valid if Hierarchical Namespace is + enabled for the account. When creating a file or directory and the + parent folder does not have a default ACL, the umask restricts the + permissions of the file or directory to be created. The resulting + permission is given by p bitwise and not u, where p is the permission + and u is the umask. For example, if p is 0777 and u is 0057, then the + resulting permission is 0720. The default permission is 0777 for a + directory and 0666 for a file. The default umask is 0027. The umask + must be specified in 4-digit octal notation (e.g. 0766). + :type umask: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.filedatalake.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if resource is not None: + query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if rename_source is not None: + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}/{path}'} + + async def update(self, action, mode, body, max_records=None, continuation=None, force_flag=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Append Data | Flush Data | Set Properties | Set Access Control. + + Uploads data to be appended to a file, flushes (writes) previously + uploaded data to a file, sets properties for a file or directory, or + sets access control for a file or directory. Data can only be appended + to a file. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: The action must be "append" to upload data to be + appended to a file, "flush" to flush previously uploaded data to a + file, "setProperties" to set the properties of a file or directory, + "setAccessControl" to set the owner, group, permissions, or access + control list for a file or directory, or "setAccessControlRecursive" + to set the access control list for a directory recursively. Note that + Hierarchical Namespace must be enabled for the account in order to use + access control. Also note that the Access Control List (ACL) includes + permissions for the owner, owning group, and others, so the + x-ms-permissions and x-ms-acl request headers are mutually exclusive. + Possible values include: 'append', 'flush', 'setProperties', + 'setAccessControl', 'setAccessControlRecursive' + :type action: str or + ~azure.storage.filedatalake.models.PathUpdateAction + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param body: Initial data + :type body: Generator + :param max_records: Optional. Valid for "SetAccessControlRecursive" + operation. It specifies the maximum number of files or directories on + which the acl change will be applied. If omitted or greater than + 2,000, the request will process up to 2,000 items + :type max_records: int + :param continuation: Optional. The number of paths processed with each + invocation is limited. If the number of paths to be processed exceeds + this limit, a continuation token is returned in the response header + x-ms-continuation. When a continuation token is returned in the + response, it must be percent-encoded and specified in a subsequent + invocation of setAcessControlRecursive operation. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" + operation. If set to false, the operation will terminate quickly on + encountering user errors (4XX). If true, the operation will ignore + user errors and proceed with the operation on other sub-entities of + the directory. Continuation token will only be returned when forceFlag + is true in case of user errors. If not set the default value is false + for this. + :type force_flag: bool + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/octet-stream' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + update.metadata = {'url': '/{filesystem}/{path}'} + + async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Lease Path. + + Create and manage a lease to restrict write and delete access to the + path. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param x_ms_lease_action: There are five lease actions: "acquire", + "break", "change", "renew", and "release". Use "acquire" and specify + the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a + new lease. Use "break" to break an existing lease. When a lease is + broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the + file. When a lease is successfully broken, the response indicates the + interval in seconds until a new lease can be acquired. Use "change" + and specify the current lease ID in "x-ms-lease-id" and the new lease + ID in "x-ms-proposed-lease-id" to change the lease ID of an active + lease. Use "renew" and specify the "x-ms-lease-id" to renew an + existing lease. Use "release" and specify the "x-ms-lease-id" to + release a lease. Possible values include: 'acquire', 'break', + 'change', 'renew', 'release' + :type x_ms_lease_action: str or + ~azure.storage.filedatalake.models.PathLeaseAction + :param x_ms_lease_duration: The lease duration is required to acquire + a lease, and specifies the duration of the lease in seconds. The + lease duration must be between 15 and 60 seconds or -1 for infinite + lease. + :type x_ms_lease_duration: int + :param x_ms_lease_break_period: The lease break period duration is + optional to break a lease, and specifies the break period of the + lease in seconds. The lease break duration must be between 0 and 60 + seconds. + :type x_ms_lease_break_period: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') + if x_ms_lease_duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') + if x_ms_lease_break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + lease.metadata = {'url': '/{filesystem}/{path}'} + + async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Read File. + + Read the contents of a file. For read operations, range requests are + supported. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param range: The HTTP Range request header specifies one or more byte + ranges of the resource to be retrieved. + :type range: str + :param x_ms_range_get_content_md5: Optional. When this header is set + to "true" and specified together with the Range header, the service + returns the MD5 hash for the range, as long as the range is less than + or equal to 4MB in size. If this header is specified without the Range + header, the service returns status code 400 (Bad Request). If this + header is set to true when the range exceeds 4 MB in size, the service + returns status code 400 (Bad Request). + :type x_ms_range_get_content_md5: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.read.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if range is not None: + header_parameters['Range'] = self._serialize.header("range", range, 'str') + if x_ms_range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + read.metadata = {'url': '/{filesystem}/{path}'} + + async def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Get Properties | Get Status | Get Access Control List. + + Get Properties returns all system and user defined properties for a + path. Get Status returns all system defined properties for a path. Get + Access Control List returns the access control list for a path. This + operation supports conditional HTTP requests. For more information, + see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: Optional. If the value is "getStatus" only the system + defined properties for the path are returned. If the value is + "getAccessControl" the access control list is returned in the response + headers (Hierarchical Namespace must be enabled for the account), + otherwise the properties are returned. Possible values include: + 'getAccessControl', 'getStatus' + :type action: str or + ~azure.storage.filedatalake.models.PathGetPropertiesAction + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if action is not None: + query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}/{path}'} + + async def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Delete File | Delete Directory. + + Delete the file or directory. This operation supports conditional HTTP + requests. For more information, see [Specifying Conditional Headers + for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}/{path}'} + + async def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + async def set_access_control_recursive(self, mode, timeout=None, continuation=None, force_flag=None, max_records=None, acl=None, request_id=None, *, cls=None, **kwargs): + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" + operation. If set to false, the operation will terminate quickly on + encountering user errors (4XX). If true, the operation will ignore + user errors and proceed with the operation on other sub-entities of + the directory. Continuation token will only be returned when forceFlag + is true in case of user errors. If not set the default value is false + for this. + :type force_flag: bool + :param max_records: Optional. It specifies the maximum number of files + or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + action = "setAccessControlRecursive" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} + + async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "flush" + + # Construct URL + url = self.flush_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + flush_data.metadata = {'url': '/{filesystem}/{path}'} + + async def append_data(self, body, position=None, timeout=None, content_length=None, transactional_content_crc64=None, request_id=None, path_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Append data to the file. + + :param body: Initial data + :type body: Generator + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + transactional_content_hash = None + if path_http_headers is not None: + transactional_content_hash = path_http_headers.transactional_content_hash + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + action = "append" + + # Construct URL + url = self.append_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_hash is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + } + return cls(response, None, response_headers) + append_data.metadata = {'url': '/{filesystem}/{path}'} + + async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs): + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', + 'RelativeToNow', 'Absolute' + :type expiry_options: str or + ~azure.storage.filedatalake.models.PathExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param expires_on: The time to set the blob to expiry + :type expires_on: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.set_expiry.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_expiry.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py new file mode 100644 index 0000000..b4cb9c5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar resource: The value must be "account" for all account operations. Constant value: "account". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.resource = "account" + + async def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, *, cls=None, **kwargs): + """List FileSystems. + + List filesystems and their properties in given account. + + :param prefix: Filters results to filesystems within the specified + prefix. + :type prefix: str + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FileSystemList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.FileSystemList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_file_systems.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FileSystemList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py new file mode 100644 index 0000000..18d10bd --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AclFailedEntry + from ._models_py3 import FileSystem + from ._models_py3 import FileSystemList + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ModifiedAccessConditions + from ._models_py3 import Path + from ._models_py3 import PathHTTPHeaders + from ._models_py3 import PathList + from ._models_py3 import SetAccessControlRecursiveResponse + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StorageError, StorageErrorException + from ._models_py3 import StorageErrorError +except (SyntaxError, ImportError): + from ._models import AclFailedEntry + from ._models import FileSystem + from ._models import FileSystemList + from ._models import LeaseAccessConditions + from ._models import ModifiedAccessConditions + from ._models import Path + from ._models import PathHTTPHeaders + from ._models import PathList + from ._models import SetAccessControlRecursiveResponse + from ._models import SourceModifiedAccessConditions + from ._models import StorageError, StorageErrorException + from ._models import StorageErrorError +from ._data_lake_storage_client_enums import ( + PathExpiryOptions, + PathGetPropertiesAction, + PathLeaseAction, + PathRenameMode, + PathResourceType, + PathSetAccessControlRecursiveMode, + PathUpdateAction, +) + +__all__ = [ + 'AclFailedEntry', + 'FileSystem', + 'FileSystemList', + 'LeaseAccessConditions', + 'ModifiedAccessConditions', + 'Path', + 'PathHTTPHeaders', + 'PathList', + 'SetAccessControlRecursiveResponse', + 'SourceModifiedAccessConditions', + 'StorageError', 'StorageErrorException', + 'StorageErrorError', + 'PathSetAccessControlRecursiveMode', + 'PathExpiryOptions', + 'PathResourceType', + 'PathRenameMode', + 'PathUpdateAction', + 'PathLeaseAction', + 'PathGetPropertiesAction', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py new file mode 100644 index 0000000..93d9c27 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class PathSetAccessControlRecursiveMode(str, Enum): + + set = "set" + modify = "modify" + remove = "remove" + + +class PathExpiryOptions(str, Enum): + + never_expire = "NeverExpire" + relative_to_creation = "RelativeToCreation" + relative_to_now = "RelativeToNow" + absolute = "Absolute" + + +class PathResourceType(str, Enum): + + directory = "directory" + file = "file" + + +class PathRenameMode(str, Enum): + + legacy = "legacy" + posix = "posix" + + +class PathUpdateAction(str, Enum): + + append = "append" + flush = "flush" + set_properties = "setProperties" + set_access_control = "setAccessControl" + set_access_control_recursive = "setAccessControlRecursive" + + +class PathLeaseAction(str, Enum): + + acquire = "acquire" + break_enum = "break" + change = "change" + renew = "renew" + release = "release" + + +class PathGetPropertiesAction(str, Enum): + + get_access_control = "getAccessControl" + get_status = "getStatus" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py new file mode 100644 index 0000000..2f44279 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AclFailedEntry(Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AclFailedEntry, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.error_message = kwargs.get('error_message', None) + + +class FileSystem(Model): + """FileSystem. + + :param name: + :type name: str + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileSystem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.last_modified = kwargs.get('last_modified', None) + self.e_tag = kwargs.get('e_tag', None) + + +class FileSystemList(Model): + """FileSystemList. + + :param filesystems: + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] + """ + + _attribute_map = { + 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, + } + + def __init__(self, **kwargs): + super(FileSystemList, self).__init__(**kwargs) + self.filesystems = kwargs.get('filesystems', None) + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param if_modified_since: Specify this header value to operate only on a + blob if it has been modified since the specified date/time. + :type if_modified_since: datetime + :param if_unmodified_since: Specify this header value to operate only on a + blob if it has not been modified since the specified date/time. + :type if_unmodified_since: datetime + :param if_match: Specify an ETag value to operate only on blobs with a + matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs + without a matching value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + + +class Path(Model): + """Path. + + :param name: + :type name: str + :param is_directory: Default value: False . + :type is_directory: bool + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + :param content_length: + :type content_length: long + :param owner: + :type owner: str + :param group: + :type group: str + :param permissions: + :type permissions: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'str'}, + 'permissions': {'key': 'permissions', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Path, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_directory = kwargs.get('is_directory', False) + self.last_modified = kwargs.get('last_modified', None) + self.e_tag = kwargs.get('e_tag', None) + self.content_length = kwargs.get('content_length', None) + self.owner = kwargs.get('owner', None) + self.group = kwargs.get('group', None) + self.permissions = kwargs.get('permissions', None) + + +class PathHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: Path_create, + Path_update, Path_flush_data, Path_append_data. + + :param cache_control: Optional. Sets the blob's cache control. If + specified, this property is stored with the blob and returned with a read + request. + :type cache_control: str + :param content_encoding: Optional. Sets the blob's content encoding. If + specified, this property is stored with the blob and returned with a read + request. + :type content_encoding: str + :param content_language: Optional. Set the blob's content language. If + specified, this property is stored with the blob and returned with a read + request. + :type content_language: str + :param content_disposition: Optional. Sets the blob's Content-Disposition + header. + :type content_disposition: str + :param content_type: Optional. Sets the blob's content type. If specified, + this property is stored with the blob and returned with a read request. + :type content_type: str + :param content_md5: Specify the transactional md5 for the body, to be + validated by the service. + :type content_md5: bytearray + :param transactional_content_hash: Specify the transactional md5 for the + body, to be validated by the service. + :type transactional_content_hash: bytearray + """ + + _attribute_map = { + 'cache_control': {'key': '', 'type': 'str'}, + 'content_encoding': {'key': '', 'type': 'str'}, + 'content_language': {'key': '', 'type': 'str'}, + 'content_disposition': {'key': '', 'type': 'str'}, + 'content_type': {'key': '', 'type': 'str'}, + 'content_md5': {'key': '', 'type': 'bytearray'}, + 'transactional_content_hash': {'key': '', 'type': 'bytearray'}, + } + + def __init__(self, **kwargs): + super(PathHTTPHeaders, self).__init__(**kwargs) + self.cache_control = kwargs.get('cache_control', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.content_type = kwargs.get('content_type', None) + self.content_md5 = kwargs.get('content_md5', None) + self.transactional_content_hash = kwargs.get('transactional_content_hash', None) + + +class PathList(Model): + """PathList. + + :param paths: + :type paths: list[~azure.storage.filedatalake.models.Path] + """ + + _attribute_map = { + 'paths': {'key': 'paths', 'type': '[Path]'}, + } + + def __init__(self, **kwargs): + super(PathList, self).__init__(**kwargs) + self.paths = kwargs.get('paths', None) + + +class SetAccessControlRecursiveResponse(Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: + list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__(self, **kwargs): + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = kwargs.get('directories_successful', None) + self.files_successful = kwargs.get('files_successful', None) + self.failure_count = kwargs.get('failure_count', None) + self.failed_entries = kwargs.get('failed_entries', None) + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for create operation. + + :param source_if_match: Specify an ETag value to operate only on blobs + with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on + blobs without a matching value. + :type source_if_none_match: str + :param source_if_modified_since: Specify this header value to operate only + on a blob if it has been modified since the specified date/time. + :type source_if_modified_since: datetime + :param source_if_unmodified_since: Specify this header value to operate + only on a blob if it has not been modified since the specified date/time. + :type source_if_unmodified_since: datetime + """ + + _attribute_map = { + 'source_if_match': {'key': '', 'type': 'str'}, + 'source_if_none_match': {'key': '', 'type': 'str'}, + 'source_if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match = kwargs.get('source_if_match', None) + self.source_if_none_match = kwargs.get('source_if_none_match', None) + self.source_if_modified_since = kwargs.get('source_if_modified_since', None) + self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) + + +class StorageError(Model): + """StorageError. + + :param error: The service error response object. + :type error: ~azure.storage.filedatalake.models.StorageErrorError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'StorageErrorError'}, + } + + def __init__(self, **kwargs): + super(StorageError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageErrorError(Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(StorageErrorError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py new file mode 100644 index 0000000..3ca8d84 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AclFailedEntry(Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, error_message: str=None, **kwargs) -> None: + super(AclFailedEntry, self).__init__(**kwargs) + self.name = name + self.type = type + self.error_message = error_message + + +class FileSystem(Model): + """FileSystem. + + :param name: + :type name: str + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, last_modified: str=None, e_tag: str=None, **kwargs) -> None: + super(FileSystem, self).__init__(**kwargs) + self.name = name + self.last_modified = last_modified + self.e_tag = e_tag + + +class FileSystemList(Model): + """FileSystemList. + + :param filesystems: + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] + """ + + _attribute_map = { + 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, + } + + def __init__(self, *, filesystems=None, **kwargs) -> None: + super(FileSystemList, self).__init__(**kwargs) + self.filesystems = filesystems + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str'}, + } + + def __init__(self, *, lease_id: str=None, **kwargs) -> None: + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ModifiedAccessConditions(Model): + """Additional parameters for a set of operations. + + :param if_modified_since: Specify this header value to operate only on a + blob if it has been modified since the specified date/time. + :type if_modified_since: datetime + :param if_unmodified_since: Specify this header value to operate only on a + blob if it has not been modified since the specified date/time. + :type if_unmodified_since: datetime + :param if_match: Specify an ETag value to operate only on blobs with a + matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs + without a matching value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + } + + def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, **kwargs) -> None: + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + + +class Path(Model): + """Path. + + :param name: + :type name: str + :param is_directory: Default value: False . + :type is_directory: bool + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + :param content_length: + :type content_length: long + :param owner: + :type owner: str + :param group: + :type group: str + :param permissions: + :type permissions: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'str'}, + 'permissions': {'key': 'permissions', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, is_directory: bool=False, last_modified: str=None, e_tag: str=None, content_length: int=None, owner: str=None, group: str=None, permissions: str=None, **kwargs) -> None: + super(Path, self).__init__(**kwargs) + self.name = name + self.is_directory = is_directory + self.last_modified = last_modified + self.e_tag = e_tag + self.content_length = content_length + self.owner = owner + self.group = group + self.permissions = permissions + + +class PathHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: Path_create, + Path_update, Path_flush_data, Path_append_data. + + :param cache_control: Optional. Sets the blob's cache control. If + specified, this property is stored with the blob and returned with a read + request. + :type cache_control: str + :param content_encoding: Optional. Sets the blob's content encoding. If + specified, this property is stored with the blob and returned with a read + request. + :type content_encoding: str + :param content_language: Optional. Set the blob's content language. If + specified, this property is stored with the blob and returned with a read + request. + :type content_language: str + :param content_disposition: Optional. Sets the blob's Content-Disposition + header. + :type content_disposition: str + :param content_type: Optional. Sets the blob's content type. If specified, + this property is stored with the blob and returned with a read request. + :type content_type: str + :param content_md5: Specify the transactional md5 for the body, to be + validated by the service. + :type content_md5: bytearray + :param transactional_content_hash: Specify the transactional md5 for the + body, to be validated by the service. + :type transactional_content_hash: bytearray + """ + + _attribute_map = { + 'cache_control': {'key': '', 'type': 'str'}, + 'content_encoding': {'key': '', 'type': 'str'}, + 'content_language': {'key': '', 'type': 'str'}, + 'content_disposition': {'key': '', 'type': 'str'}, + 'content_type': {'key': '', 'type': 'str'}, + 'content_md5': {'key': '', 'type': 'bytearray'}, + 'transactional_content_hash': {'key': '', 'type': 'bytearray'}, + } + + def __init__(self, *, cache_control: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, content_type: str=None, content_md5: bytearray=None, transactional_content_hash: bytearray=None, **kwargs) -> None: + super(PathHTTPHeaders, self).__init__(**kwargs) + self.cache_control = cache_control + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + self.content_type = content_type + self.content_md5 = content_md5 + self.transactional_content_hash = transactional_content_hash + + +class PathList(Model): + """PathList. + + :param paths: + :type paths: list[~azure.storage.filedatalake.models.Path] + """ + + _attribute_map = { + 'paths': {'key': 'paths', 'type': '[Path]'}, + } + + def __init__(self, *, paths=None, **kwargs) -> None: + super(PathList, self).__init__(**kwargs) + self.paths = paths + + +class SetAccessControlRecursiveResponse(Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: + list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__(self, *, directories_successful: int=None, files_successful: int=None, failure_count: int=None, failed_entries=None, **kwargs) -> None: + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = directories_successful + self.files_successful = files_successful + self.failure_count = failure_count + self.failed_entries = failed_entries + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for create operation. + + :param source_if_match: Specify an ETag value to operate only on blobs + with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on + blobs without a matching value. + :type source_if_none_match: str + :param source_if_modified_since: Specify this header value to operate only + on a blob if it has been modified since the specified date/time. + :type source_if_modified_since: datetime + :param source_if_unmodified_since: Specify this header value to operate + only on a blob if it has not been modified since the specified date/time. + :type source_if_unmodified_since: datetime + """ + + _attribute_map = { + 'source_if_match': {'key': '', 'type': 'str'}, + 'source_if_none_match': {'key': '', 'type': 'str'}, + 'source_if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, source_if_match: str=None, source_if_none_match: str=None, source_if_modified_since=None, source_if_unmodified_since=None, **kwargs) -> None: + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + + +class StorageError(Model): + """StorageError. + + :param error: The service error response object. + :type error: ~azure.storage.filedatalake.models.StorageErrorError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'StorageErrorError'}, + } + + def __init__(self, *, error=None, **kwargs) -> None: + super(StorageError, self).__init__(**kwargs) + self.error = error + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageErrorError(Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: + super(StorageErrorError, self).__init__(**kwargs) + self.code = code + self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py new file mode 100644 index 0000000..9efa6df --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._file_system_operations import FileSystemOperations +from ._path_operations import PathOperations + +__all__ = [ + 'ServiceOperations', + 'FileSystemOperations', + 'PathOperations', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py new file mode 100644 index 0000000..b0d17ff --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py @@ -0,0 +1,462 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class FileSystemOperations(object): + """FileSystemOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + def create(self, properties=None, request_id=None, timeout=None, cls=None, **kwargs): + """Create FileSystem. + + Create a FileSystem rooted at the specified location. If the FileSystem + already exists, the operation fails. This operation does not support + conditional HTTP requests. + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}'} + + def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs): + """Set FileSystem Properties. + + Set properties for the FileSystem. This operation supports conditional + HTTP requests. For more information, see [Specifying Conditional + Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{filesystem}'} + + def get_properties(self, request_id=None, timeout=None, cls=None, **kwargs): + """Get FileSystem Properties. + + All system and user-defined filesystem properties are specified in the + response headers. + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}'} + + def delete(self, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs): + """Delete FileSystem. + + Marks the FileSystem for deletion. When a FileSystem is deleted, a + FileSystem with the same identifier cannot be created for at least 30 + seconds. While the filesystem is being deleted, attempts to create a + filesystem with the same identifier will fail with status code 409 + (Conflict), with the service returning additional error information + indicating that the filesystem is being deleted. All other operations, + including operations on any files or directories within the filesystem, + will fail with status code 404 (Not Found) while the filesystem is + being deleted. This operation supports conditional HTTP requests. For + more information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}'} + + def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, cls=None, **kwargs): + """List Paths. + + List FileSystem paths and their properties. + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param path: Optional. Filters results to paths within the specified + directory. An error occurs if the directory does not exist. + :type path: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: PathList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.PathList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_paths.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if path is not None: + query_parameters['directory'] = self._serialize.query("path", path, 'str') + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PathList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py new file mode 100644 index 0000000..9ed6157 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py @@ -0,0 +1,1697 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class PathOperations(object): + """PathOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar comp: . Constant value: "expiry". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.comp = "expiry" + + def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): + """Create File | Create Directory | Rename File | Rename Directory. + + Create or rename a file or directory. By default, the destination is + overwritten and if the destination already exists and has a lease the + lease is broken. This operation supports conditional HTTP requests. + For more information, see [Specifying Conditional Headers for Blob + Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + To fail if the destination already exists, use a conditional request + with If-None-Match: "*". + + :param resource: Required only for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource: str or + ~azure.storage.filedatalake.models.PathResourceType + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param mode: Optional. Valid only when namespace is enabled. This + parameter determines the behavior of the rename operation. The value + must be "legacy" or "posix", and the default value will be "posix". + Possible values include: 'legacy', 'posix' + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode + :param rename_source: An optional file or directory to be renamed. + The value must have the following format: "/{filesystem}/{path}". If + "x-ms-properties" is specified, the properties will overwrite the + existing properties; otherwise, the existing properties will be + preserved. This value must be a URL percent-encoded string. Note that + the string may only contain ASCII characters in the ISO-8859-1 + character set. + :type rename_source: str + :param source_lease_id: A lease ID for the source path. If specified, + the source path must have an active lease and the lease ID must match. + :type source_lease_id: str + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param umask: Optional and only valid if Hierarchical Namespace is + enabled for the account. When creating a file or directory and the + parent folder does not have a default ACL, the umask restricts the + permissions of the file or directory to be created. The resulting + permission is given by p bitwise and not u, where p is the permission + and u is the umask. For example, if p is 0777 and u is 0057, then the + resulting permission is 0720. The default permission is 0777 for a + directory and 0666 for a file. The default umask is 0027. The umask + must be specified in 4-digit octal notation (e.g. 0766). + :type umask: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.filedatalake.models.SourceModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + source_if_match = None + if source_modified_access_conditions is not None: + source_if_match = source_modified_access_conditions.source_if_match + source_if_none_match = None + if source_modified_access_conditions is not None: + source_if_none_match = source_modified_access_conditions.source_if_none_match + source_if_modified_since = None + if source_modified_access_conditions is not None: + source_if_modified_since = source_modified_access_conditions.source_if_modified_since + source_if_unmodified_since = None + if source_modified_access_conditions is not None: + source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if resource is not None: + query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if rename_source is not None: + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') + if source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') + if source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') + if source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{filesystem}/{path}'} + + def update(self, action, mode, body, max_records=None, continuation=None, force_flag=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Append Data | Flush Data | Set Properties | Set Access Control. + + Uploads data to be appended to a file, flushes (writes) previously + uploaded data to a file, sets properties for a file or directory, or + sets access control for a file or directory. Data can only be appended + to a file. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: The action must be "append" to upload data to be + appended to a file, "flush" to flush previously uploaded data to a + file, "setProperties" to set the properties of a file or directory, + "setAccessControl" to set the owner, group, permissions, or access + control list for a file or directory, or "setAccessControlRecursive" + to set the access control list for a directory recursively. Note that + Hierarchical Namespace must be enabled for the account in order to use + access control. Also note that the Access Control List (ACL) includes + permissions for the owner, owning group, and others, so the + x-ms-permissions and x-ms-acl request headers are mutually exclusive. + Possible values include: 'append', 'flush', 'setProperties', + 'setAccessControl', 'setAccessControlRecursive' + :type action: str or + ~azure.storage.filedatalake.models.PathUpdateAction + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param body: Initial data + :type body: Generator + :param max_records: Optional. Valid for "SetAccessControlRecursive" + operation. It specifies the maximum number of files or directories on + which the acl change will be applied. If omitted or greater than + 2,000, the request will process up to 2,000 items + :type max_records: int + :param continuation: Optional. The number of paths processed with each + invocation is limited. If the number of paths to be processed exceeds + this limit, a continuation token is returned in the response header + x-ms-continuation. When a continuation token is returned in the + response, it must be percent-encoded and specified in a subsequent + invocation of setAcessControlRecursive operation. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" + operation. If set to false, the operation will terminate quickly on + encountering user errors (4XX). If true, the operation will ignore + user errors and proceed with the operation on other sub-entities of + the directory. Continuation token will only be returned when forceFlag + is true in case of user errors. If not set the default value is false + for this. + :type force_flag: bool + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param properties: Optional. User-defined properties to be stored with + the filesystem, in the format of a comma-separated list of name and + value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded + string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties + not included in the list will be removed. All properties are removed + if the header is omitted. To merge new and existing properties, first + get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all + properties. + :type properties: str + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/octet-stream' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + update.metadata = {'url': '/{filesystem}/{path}'} + + def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Lease Path. + + Create and manage a lease to restrict write and delete access to the + path. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param x_ms_lease_action: There are five lease actions: "acquire", + "break", "change", "renew", and "release". Use "acquire" and specify + the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a + new lease. Use "break" to break an existing lease. When a lease is + broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the + file. When a lease is successfully broken, the response indicates the + interval in seconds until a new lease can be acquired. Use "change" + and specify the current lease ID in "x-ms-lease-id" and the new lease + ID in "x-ms-proposed-lease-id" to change the lease ID of an active + lease. Use "renew" and specify the "x-ms-lease-id" to renew an + existing lease. Use "release" and specify the "x-ms-lease-id" to + release a lease. Possible values include: 'acquire', 'break', + 'change', 'renew', 'release' + :type x_ms_lease_action: str or + ~azure.storage.filedatalake.models.PathLeaseAction + :param x_ms_lease_duration: The lease duration is required to acquire + a lease, and specifies the duration of the lease in seconds. The + lease duration must be between 15 and 60 seconds or -1 for infinite + lease. + :type x_ms_lease_duration: int + :param x_ms_lease_break_period: The lease break period duration is + optional to break a lease, and specifies the break period of the + lease in seconds. The lease break duration must be between 0 and 60 + seconds. + :type x_ms_lease_break_period: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The Blob service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') + if x_ms_lease_duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') + if x_ms_lease_break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + lease.metadata = {'url': '/{filesystem}/{path}'} + + def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Read File. + + Read the contents of a file. For read operations, range requests are + supported. This operation supports conditional HTTP requests. For more + information, see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param range: The HTTP Range request header specifies one or more byte + ranges of the resource to be retrieved. + :type range: str + :param x_ms_range_get_content_md5: Optional. When this header is set + to "true" and specified together with the Range header, the service + returns the MD5 hash for the range, as long as the range is less than + or equal to 4MB in size. If this header is specified without the Range + header, the service returns status code 400 (Bad Request). If this + header is set to true when the range exceeds 4 MB in size, the service + returns status code 400 (Bad Request). + :type x_ms_range_get_content_md5: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.read.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if range is not None: + header_parameters['Range'] = self._serialize.header("range", range, 'str') + if x_ms_range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + read.metadata = {'url': '/{filesystem}/{path}'} + + def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Get Properties | Get Status | Get Access Control List. + + Get Properties returns all system and user defined properties for a + path. Get Status returns all system defined properties for a path. Get + Access Control List returns the access control list for a path. This + operation supports conditional HTTP requests. For more information, + see [Specifying Conditional Headers for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param action: Optional. If the value is "getStatus" only the system + defined properties for the path are returned. If the value is + "getAccessControl" the access control list is returned in the response + headers (Hierarchical Namespace must be enabled for the account), + otherwise the properties are returned. Possible values include: + 'getAccessControl', 'getStatus' + :type action: str or + ~azure.storage.filedatalake.models.PathGetPropertiesAction + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if action is not None: + query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), + 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), + 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), + 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), + 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), + 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{filesystem}/{path}'} + + def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Delete File | Delete Directory. + + Delete the file or directory. This operation supports conditional HTTP + requests. For more information, see [Specifying Conditional Headers + for Blob Service + Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). + + :param recursive: Required + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{filesystem}/{path}'} + + def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "setAccessControl" + + # Construct URL + url = self.set_access_control.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + set_access_control.metadata = {'url': '/{filesystem}/{path}'} + + def set_access_control_recursive(self, mode, timeout=None, continuation=None, force_flag=None, max_records=None, acl=None, request_id=None, cls=None, **kwargs): + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" + operation. If set to false, the operation will terminate quickly on + encountering user errors (4XX). If true, the operation will ignore + user errors and proceed with the operation on other sub-entities of + the directory. Continuation token will only be returned when forceFlag + is true in case of user errors. If not set the default value is false + for this. + :type force_flag: bool + :param max_records: Optional. It specifies the maximum number of files + or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + action = "setAccessControlRecursive" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} + + def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + content_md5 = None + if path_http_headers is not None: + content_md5 = path_http_headers.content_md5 + cache_control = None + if path_http_headers is not None: + cache_control = path_http_headers.cache_control + content_type = None + if path_http_headers is not None: + content_type = path_http_headers.content_type + content_disposition = None + if path_http_headers is not None: + content_disposition = path_http_headers.content_disposition + content_encoding = None + if path_http_headers is not None: + content_encoding = path_http_headers.content_encoding + content_language = None + if path_http_headers is not None: + content_language = path_http_headers.content_language + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + + action = "flush" + + # Construct URL + url = self.flush_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + if cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') + if content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') + if content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') + if content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') + if content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + return cls(response, None, response_headers) + flush_data.metadata = {'url': '/{filesystem}/{path}'} + + def append_data(self, body, position=None, timeout=None, content_length=None, transactional_content_crc64=None, request_id=None, path_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): + """Append data to the file. + + :param body: Initial data + :type body: Generator + :param position: This parameter allows the caller to upload data in + parallel and control the order in which it is appended to the file. + It is required when uploading data to be appended to the file and when + flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not + immediately flushed, or written, to the file. To flush, the + previously uploaded data must be contiguous, the position parameter + must be specified and equal to the length of the file after all data + has been written, and there must not be a request entity body included + with the request. + :type position: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param content_length: Required for "Append Data" and "Flush Data". + Must be 0 for "Flush Data". Must be the length of the request content + in bytes for "Append Data". + :type content_length: long + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param path_http_headers: Additional parameters for the operation + :type path_http_headers: + ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.filedatalake.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + transactional_content_hash = None + if path_http_headers is not None: + transactional_content_hash = path_http_headers.transactional_content_hash + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + action = "append" + + # Construct URL + url = self.append_data.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_hash is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + } + return cls(response, None, response_headers) + append_data.metadata = {'url': '/{filesystem}/{path}'} + + def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, cls=None, **kwargs): + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', + 'RelativeToNow', 'Absolute' + :type expiry_options: str or + ~azure.storage.filedatalake.models.PathExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param expires_on: The time to set the blob to expiry + :type expires_on: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.set_expiry.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_expiry.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py new file mode 100644 index 0000000..540079a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar resource: The value must be "account" for all account operations. Constant value: "account". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.resource = "account" + + def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, cls=None, **kwargs): + """List FileSystems. + + List filesystems and their properties in given account. + + :param prefix: Filters results to filesystems within the specified + prefix. + :type prefix: str + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_results: An optional value that specifies the maximum + number of items to return. If omitted or greater than 5,000, the + response will include up to 5,000 items. + :type max_results: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FileSystemList or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.FileSystemList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.list_file_systems.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FileSystemList', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py new file mode 100644 index 0000000..6ef707d --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2020-02-10" + diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py new file mode 100644 index 0000000..1e4b19e --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py @@ -0,0 +1,73 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from azure.core.paging import PageIterator +from ._generated.models import StorageErrorException +from ._models import PathProperties +from ._deserialize import return_headers_and_deserialized_path_list +from ._generated.models import Path +from ._shared.response_handlers import process_storage_error + + +class PathPropertiesPaged(PageIterator): + """An Iterable of Path properties. + + :ivar str path: Filters the results to return only paths under the specified path. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. + + :param callable command: Function to retrieve the next page of items. + :param str path: Filters the results to return only paths under the specified path. + :param int max_results: The maximum number of psths to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__( + self, command, + recursive, + path=None, + max_results=None, + continuation_token=None, + upn=None): + super(PathPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.recursive = recursive + self.results_per_page = max_results + self.path = path + self.upn = upn + self.current_page = None + self.path_list = None + + def _get_next_cb(self, continuation_token): + try: + return self._command( + self.recursive, + continuation=continuation_token or None, + path=self.path, + max_results=self.results_per_page, + upn=self.upn, + cls=return_headers_and_deserialized_path_list) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.path_list, self._response = get_next_return + self.current_page = [self._build_item(item) for item in self.path_list] + + return self._response['continuation'] or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, PathProperties): + return item + if isinstance(item, Path): + path = PathProperties._from_generated(item) # pylint: disable=protected-access + return path + return item diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py new file mode 100644 index 0000000..0a87afb --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py @@ -0,0 +1,837 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from enum import Enum + +from azure.multiapi.storagev2.blob.v2019_07_07 import LeaseProperties as BlobLeaseProperties +from azure.multiapi.storagev2.blob.v2019_07_07 import AccountSasPermissions as BlobAccountSasPermissions +from azure.multiapi.storagev2.blob.v2019_07_07 import ResourceTypes as BlobResourceTypes +from azure.multiapi.storagev2.blob.v2019_07_07 import UserDelegationKey as BlobUserDelegationKey +from azure.multiapi.storagev2.blob.v2019_07_07 import ContentSettings as BlobContentSettings +from azure.multiapi.storagev2.blob.v2019_07_07 import AccessPolicy as BlobAccessPolicy +from azure.multiapi.storagev2.blob.v2019_07_07 import DelimitedTextDialect as BlobDelimitedTextDialect +from azure.multiapi.storagev2.blob.v2019_07_07 import DelimitedJsonDialect as BlobDelimitedJSON +from azure.multiapi.storagev2.blob.v2019_07_07 import ArrowDialect as BlobArrowDialect +from azure.multiapi.storagev2.blob.v2019_07_07._models import ContainerPropertiesPaged +from ._shared.models import DictMixin + + +class FileSystemProperties(object): + """File System properties class. + + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file system was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the file system. + :ivar str public_access: Specifies whether data in the file system may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the file system has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the file system has a legal hold. + :ivar dict metadata: A dict with name-value pairs to associate with the + file system as metadata. + + Returned ``FileSystemProperties`` instances expose these values through a + dictionary interface, for example: ``file_system_props["last_modified"]``. + Additionally, the file system name is available as ``file_system_props["name"]``. + """ + + def __init__(self): + self.name = None + self.last_modified = None + self.etag = None + self.lease = None + self.public_access = None + self.has_immutability_policy = None + self.has_legal_hold = None + self.metadata = None + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access + generated.properties.public_access) + props.has_immutability_policy = generated.properties.has_immutability_policy + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + return props + + @classmethod + def _convert_from_container_props(cls, container_properties): + container_properties.__class__ = cls + container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access + container_properties.public_access) + container_properties.lease.__class__ = LeaseProperties + return container_properties + + +class FileSystemPropertiesPaged(ContainerPropertiesPaged): + """An Iterable of File System properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file system name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only file systems whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of file system names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__(self, *args, **kwargs): + super(FileSystemPropertiesPaged, self).__init__( + *args, + **kwargs + ) + + @staticmethod + def _build_item(item): + return FileSystemProperties._from_generated(item) # pylint: disable=protected-access + + +class DirectoryProperties(DictMixin): + """ + :ivar str name: name of the directory + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool deleted: if the current directory marked as deleted + :ivar dict metadata: Name-value pairs associated with the directory as metadata. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar ~datetime.datetime creation_time: + Indicates when the directory was created, in UTC. + :ivar int remaining_retention_days: The number of days that the directory will be retained + before being permanently deleted by the service. + :var ~azure.storage.filedatalake.ContentSettings content_settings: + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.etag = kwargs.get('ETag') + self.deleted = None + self.metadata = kwargs.get('metadata') + self.lease = LeaseProperties(**kwargs) + self.last_modified = kwargs.get('Last-Modified') + self.creation_time = kwargs.get('x-ms-creation-time') + self.deleted_time = None + self.remaining_retention_days = None + + +class FileProperties(DictMixin): + """ + :ivar str name: name of the file + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool deleted: if the current file marked as deleted + :ivar dict metadata: Name-value pairs associated with the file as metadata. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar ~datetime.datetime creation_time: + Indicates when the file was created, in UTC. + :ivar int size: size of the file + :ivar int remaining_retention_days: The number of days that the file will be retained + before being permanently deleted by the service. + :var ~azure.storage.filedatalake.ContentSettings content_settings: + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.etag = kwargs.get('ETag') + self.deleted = None + self.metadata = kwargs.get('metadata') + self.lease = LeaseProperties(**kwargs) + self.last_modified = kwargs.get('Last-Modified') + self.creation_time = kwargs.get('x-ms-creation-time') + self.size = kwargs.get('Content-Length') + self.deleted_time = None + self.expiry_time = kwargs.get("x-ms-expiry-time") + self.remaining_retention_days = None + self.content_settings = ContentSettings(**kwargs) + + +class PathProperties(object): + """Path properties listed by get_paths api. + + :ivar str name: the full path for a file or directory. + :ivar str owner: The owner of the file or directory. + :ivar str group: he owning group of the file or directory. + :ivar str permissions: Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. + :ivar bool is_directory: is the path a directory or not. + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar content_length: the size of file if the path is a file. + """ + + def __init__(self, **kwargs): + super(PathProperties, self).__init__( + **kwargs + ) + self.name = kwargs.pop('name', None) + self.owner = kwargs.get('owner', None) + self.group = kwargs.get('group', None) + self.permissions = kwargs.get('permissions', None) + self.last_modified = kwargs.get('last_modified', None) + self.is_directory = kwargs.get('is_directory', False) + self.etag = kwargs.get('etag', None) + self.content_length = kwargs.get('content_length', None) + + @classmethod + def _from_generated(cls, generated): + path_prop = PathProperties() + path_prop.name = generated.name + path_prop.owner = generated.owner + path_prop.group = generated.group + path_prop.permissions = generated.permissions + path_prop.last_modified = generated.last_modified + path_prop.is_directory = bool(generated.is_directory) + path_prop.etag = generated.additional_properties.get('etag') + path_prop.content_length = generated.content_length + return path_prop + + +class LeaseProperties(BlobLeaseProperties): + """DataLake Lease Properties. + + :ivar str status: + The lease status of the file. Possible values: locked|unlocked + :ivar str state: + Lease state of the file. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file is leased, specifies whether the lease is of infinite or fixed duration. + """ + + +class ContentSettings(BlobContentSettings): + """The content settings of a file or directory. + + :ivar str content_type: + The content type specified for the file or directory. If no content type was + specified, the default content type is application/octet-stream. + :ivar str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :ivar str content_language: + If the content_language has previously been set + for the file, that value is stored. + :ivar str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :ivar str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :ivar str content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + :keyword str content_type: + The content type specified for the file or directory. If no content type was + specified, the default content type is application/octet-stream. + :keyword str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :keyword str content_language: + If the content_language has previously been set + for the file, that value is stored. + :keyword str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :keyword str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :keyword str content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, **kwargs): + super(ContentSettings, self).__init__( + **kwargs + ) + + +class AccountSasPermissions(BlobAccountSasPermissions): + def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin + create=False): + super(AccountSasPermissions, self).__init__( + read=read, create=create, write=write, list=list, + delete=delete + ) + + +class FileSystemSasPermissions(object): + """FileSystemSasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_file_system_sas` function. + + :param bool read: + Read the content, properties, metadata etc. + :param bool write: + Create or write content, properties, metadata. Lease the file system. + :param bool delete: + Delete the file system. + :param bool list: + List paths in the file system. + :keyword bool move: + Move any file in the directory to a new location. + Note the move operation can optionally be restricted to the child file or directory owner or + the parent directory owner if the saoid parameter is included in the token and the sticky bit is set + on the parent directory. + :keyword bool execute: + Get the status (system defined properties) and ACL of any file in the directory. + If the caller is the owner, set access control on any file in the directory. + :keyword bool manage_ownership: + Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory + within a folder that has the sticky bit set. + :keyword bool manage_access_control: + Allows the user to set permissions and POSIX ACLs on files and directories. + """ + + def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin + **kwargs): + self.read = read + self.write = write + self.delete = delete + self.list = list + self.move = kwargs.pop('move', None) + self.execute = kwargs.pop('execute', None) + self.manage_ownership = kwargs.pop('manage_ownership', None) + self.manage_access_control = kwargs.pop('manage_access_control', None) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('o' if self.manage_ownership else '') + + ('p' if self.manage_access_control else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSystemSasPermissions from a string. + + To specify read, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A FileSystemSasPermissions object + :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_manage_ownership = 'o' in permission + p_manage_access_control = 'p' in permission + + parsed = cls(read=p_read, write=p_write, delete=p_delete, + list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, + manage_access_control=p_manage_access_control) + return parsed + + +class DirectorySasPermissions(object): + """DirectorySasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_directory_sas` function. + + :param bool read: + Read the content, properties, metadata etc. + :param bool create: + Create a new directory + :param bool write: + Create or write content, properties, metadata. Lease the directory. + :param bool delete: + Delete the directory. + :keyword bool list: + List any files in the directory. Implies Execute. + :keyword bool move: + Move any file in the directory to a new location. + Note the move operation can optionally be restricted to the child file or directory owner or + the parent directory owner if the saoid parameter is included in the token and the sticky bit is set + on the parent directory. + :keyword bool execute: + Get the status (system defined properties) and ACL of any file in the directory. + If the caller is the owner, set access control on any file in the directory. + :keyword bool manage_ownership: + Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory + within a folder that has the sticky bit set. + :keyword bool manage_access_control: + Allows the user to set permissions and POSIX ACLs on files and directories. + """ + + def __init__(self, read=False, create=False, write=False, + delete=False, **kwargs): + self.read = read + self.create = create + self.write = write + self.delete = delete + self.list = kwargs.pop('list', None) + self.move = kwargs.pop('move', None) + self.execute = kwargs.pop('execute', None) + self.manage_ownership = kwargs.pop('manage_ownership', None) + self.manage_access_control = kwargs.pop('manage_access_control', None) + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('o' if self.manage_ownership else '') + + ('p' if self.manage_access_control else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a DirectorySasPermissions from a string. + + To specify read, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A DirectorySasPermissions object + :rtype: ~azure.storage.filedatalake.DirectorySasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_manage_ownership = 'o' in permission + p_manage_access_control = 'p' in permission + + parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, + list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, + manage_access_control=p_manage_access_control) + return parsed + + +class FileSasPermissions(object): + """FileSasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_file_sas` function. + + :param bool read: + Read the content, properties, metadata etc. Use the file as + the source of a read operation. + :param bool create: + Write a new file + :param bool write: + Create or write content, properties, metadata. Lease the file. + :param bool delete: + Delete the file. + :keyword bool move: + Move any file in the directory to a new location. + Note the move operation can optionally be restricted to the child file or directory owner or + the parent directory owner if the saoid parameter is included in the token and the sticky bit is set + on the parent directory. + :keyword bool execute: + Get the status (system defined properties) and ACL of any file in the directory. + If the caller is the owner, set access control on any file in the directory. + :keyword bool manage_ownership: + Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory + within a folder that has the sticky bit set. + :keyword bool manage_access_control: + Allows the user to set permissions and POSIX ACLs on files and directories. + """ + + def __init__(self, read=False, create=False, write=False, delete=False, **kwargs): + self.read = read + self.create = create + self.write = write + self.delete = delete + self.list = list + self.move = kwargs.pop('move', None) + self.execute = kwargs.pop('execute', None) + self.manage_ownership = kwargs.pop('manage_ownership', None) + self.manage_access_control = kwargs.pop('manage_access_control', None) + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('o' if self.manage_ownership else '') + + ('p' if self.manage_access_control else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSasPermissions from a string. + + To specify read, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A FileSasPermissions object + :rtype: ~azure.storage.fildatalake.FileSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_manage_ownership = 'o' in permission + p_manage_access_control = 'p' in permission + + parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, + move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, + manage_access_control=p_manage_access_control) + return parsed + + +class AccessPolicy(BlobAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + """ + + def __init__(self, permission=None, expiry=None, **kwargs): + super(AccessPolicy, self).__init__( + permission=permission, expiry=expiry, start=kwargs.pop('start', None) + ) + + +class ResourceTypes(BlobResourceTypes): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g.List File Systems) + :param bool file_system: + Access to file_system-level APIs (e.g., Create/Delete file system, + List Directories/Files) + :param bool object: + Access to object-level APIs for + files(e.g. Create File, etc.) + """ + + def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin + ): + super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) + + +class UserDelegationKey(BlobUserDelegationKey): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + + @classmethod + def _from_generated(cls, generated): + delegation_key = cls() + delegation_key.signed_oid = generated.signed_oid + delegation_key.signed_tid = generated.signed_tid + delegation_key.signed_start = generated.signed_start + delegation_key.signed_expiry = generated.signed_expiry + delegation_key.signed_service = generated.signed_service + delegation_key.signed_version = generated.signed_version + delegation_key.value = generated.value + return delegation_key + + +class PublicAccess(str, Enum): + """ + Specifies whether data in the file system may be accessed publicly and the level of access. + """ + + File = 'blob' + """ + Specifies public read access for files. file data within this file system can be read + via anonymous request, but file system data is not available. Clients cannot enumerate + files within the container via anonymous request. + """ + + FileSystem = 'container' + """ + Specifies full public read access for file system and file data. Clients can enumerate + files within the file system via anonymous request, but cannot enumerate file systems + within the storage account. + """ + + @classmethod + def _from_generated(cls, public_access): + if public_access == "blob": # pylint:disable=no-else-return + return cls.File + elif public_access == "container": + return cls.FileSystem + + return None + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class DelimitedJsonDialect(BlobDelimitedJSON): + """Defines the input or output JSON serialization for a datalake query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + +class DelimitedTextDialect(BlobDelimitedTextDialect): + """Defines the input or output delimited (CSV) serialization for a datalake query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + + +class ArrowDialect(BlobArrowDialect): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param str type: Required. + :keyword str name: The name of the field. + :keyword int precision: The precision of the field. + :keyword int scale: The scale of the field. + """ + + +class ArrowType(str, Enum): + + INT64 = "int64" + BOOL = "bool" + TIMESTAMP_MS = "timestamp[ms]" + STRING = "string" + DOUBLE = "double" + DECIMAL = 'decimal' + + +class DataLakeFileQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position + + +class AccessControlChangeCounters(DictMixin): + """ + AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively. + + :ivar int directories_successful: + Number of directories where Access Control List has been updated successfully. + :ivar int files_successful: + Number of files where Access Control List has been updated successfully. + :ivar int failure_count: + Number of paths where Access Control List update has failed. + """ + + def __init__(self, directories_successful, files_successful, failure_count): + self.directories_successful = directories_successful + self.files_successful = files_successful + self.failure_count = failure_count + + +class AccessControlChangeResult(DictMixin): + """ + AccessControlChangeResult contains result of operations that change Access Control Lists recursively. + + :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters: + Contains counts of paths changed from start of the operation. + :ivar str continuation: + Optional continuation token. + Value is present when operation is split into multiple batches and can be used to resume progress. + """ + + def __init__(self, counters, continuation): + self.counters = counters + self.continuation = continuation + + +class AccessControlChangeFailure(DictMixin): + """ + Represents an entry that failed to update Access Control List. + + :ivar str name: + Name of the entry. + :ivar bool is_directory: + Indicates whether the entry is a directory. + :ivar str error_message: + Indicates the reason why the entry failed to update. + """ + + def __init__(self, name, is_directory, error_message): + self.name = name + self.is_directory = is_directory + self.error_message = error_message + + +class AccessControlChanges(DictMixin): + """ + AccessControlChanges contains batch and cumulative counts of operations + that change Access Control Lists recursively. + Additionally it exposes path entries that failed to update while these operations progress. + + :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters: + Contains counts of paths changed within single batch. + :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters: + Contains counts of paths changed from start of the operation. + :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures: + List of path entries that failed to update Access Control List within single batch. + :ivar str continuation: + An opaque continuation token that may be used to resume the operations in case of failures. + """ + + def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation): + self.batch_counters = batch_counters + self.aggregate_counters = aggregate_counters + self.batch_failures = batch_failures + self.continuation = continuation + + +class DataLakeAclChangeFailedError(Exception): + """The error happened during set/update/remove acl recursive operation. + + :ivar ~azure.core.exceptions.AzureError error: + The exception. + :ivar str description: + A description of the error. + :ivar str continuation: + An opaque continuation token that may be used to resume the operations in case of failures. + """ + + def __init__(self, error, description, continuation): + self.error = error + self.description = description + self.continuation = continuation diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py new file mode 100644 index 0000000..9562e60 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py @@ -0,0 +1,872 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +try: + from urllib.parse import urlparse, quote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.exceptions import AzureError +from azure.multiapi.storagev2.blob.v2019_07_07 import BlobClient +from ._data_lake_lease import DataLakeLeaseClient +from ._deserialize import process_storage_error +from ._generated import DataLakeStorageClient +from ._generated.models import StorageErrorException +from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \ + AccessControlChangeCounters, AccessControlChangeFailure, DataLakeAclChangeFailedError +from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ + get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions +from ._shared.base_client import StorageAccountHostsMixin, parse_query +from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class PathClient(StorageAccountHostsMixin): + def __init__( + self, account_url, # type: str + file_system_name, # type: str + path_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + # remove the preceding/trailing delimiter from the path components + file_system_name = file_system_name.strip('/') + + # the name of root directory is / + if path_name != '/': + path_name = path_name.strip('/') + + if not (file_system_name and path_name): + raise ValueError("Please specify a file system name and file path.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + self._blob_account_url = blob_account_url + + datalake_hosts = kwargs.pop('_hosts', None) + blob_hosts = None + if datalake_hosts: + blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, + credential=credential, _hosts=blob_hosts, **kwargs) + + _, sas_token = parse_query(parsed_url.query) + self.file_system_name = file_system_name + self.path_name = path_name + + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, + _hosts=datalake_hosts, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) + self._datalake_client_for_blob_operation = DataLakeStorageClient(self._blob_client.url, + file_system_name, path_name, + pipeline=self._pipeline) + + def __exit__(self, *args): + self._blob_client.close() + super(PathClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._blob_client.close() + self.__exit__() + + def _format_url(self, hostname): + file_system_name = self.file_system_name + if isinstance(file_system_name, six.text_type): + file_system_name = file_system_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(file_system_name), + quote(self.path_name, safe='~'), + self._query_str) + + def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'resource': resource_type, + 'properties': add_metadata_headers(metadata), + 'permissions': kwargs.pop('permissions', None), + 'umask': kwargs.pop('umask', None), + 'path_http_headers': path_http_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create directory or file + + :param resource_type: + Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource_type: str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file/directory as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Dict[str, Union[str, datetime]] + """ + options = self._create_path_options( + resource_type, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _delete_path_options(**kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'recursive': True, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + def _delete(self, **kwargs): + # type: (bool, **Any) -> None + """ + Marks the specified path for deletion. + + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + options = self._delete_path_options(**kwargs) + try: + return self._client.path.delete(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'owner': owner, + 'group': group, + 'permissions': permissions, + 'acl': acl, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def set_access_control(self, owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Set the owner, group, permissions, or access control list for a path. + + :param owner: + Optional. The owner of the file or directory. + :type owner: str + :param group: + Optional. The owning group of the file or directory. + :type group: str + :param permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. + :type permissions: str + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. + :type acl: str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict (Etag and last modified). + """ + if not any([owner, group, permissions, acl]): + raise ValueError("At least one parameter should be set for set_access_control API") + options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) + try: + return self._client.path.set_access_control(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _get_access_control_options(upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'action': 'getAccessControl', + 'upn': upn if upn else False, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def get_access_control(self, upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + :param upn: Optional. + Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict. + """ + options = self._get_access_control_options(upn=upn, **kwargs) + try: + return self._client.path.get_properties(**options) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _set_access_control_recursive_options(mode, acl, **kwargs): + # type: (str, str, **Any) -> Dict[str, Any] + + options = { + 'mode': mode, + 'force_flag': kwargs.pop('continue_on_failure', None), + 'timeout': kwargs.pop('timeout', None), + 'continuation': kwargs.pop('continuation', None), + 'max_records': kwargs.pop('batch_size', None), + 'acl': acl, + 'cls': return_headers_and_deserialized} + options.update(kwargs) + return options + + def set_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Sets the Access Control on a path and sub-paths. + + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) + return self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + def update_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Modifies the Access Control on a path and sub-paths. + + :param acl: + Modifies POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) + return self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + def remove_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Removes the Access Control on a path and sub-paths. + + :param acl: + Removes POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, and a user or + group identifier in the format "[scope:][type]:[id]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed then, + continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) + return self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + def _set_access_control_internal(self, options, progress_hook, max_batches=None): + try: + continue_on_failure = options.get('force_flag') + total_directories_successful = 0 + total_files_success = 0 + total_failure_count = 0 + batch_count = 0 + last_continuation_token = None + current_continuation_token = None + continue_operation = True + while continue_operation: + headers, resp = self._client.path.set_access_control_recursive(**options) + + # make a running tally so that we can report the final results + total_directories_successful += resp.directories_successful + total_files_success += resp.files_successful + total_failure_count += resp.failure_count + batch_count += 1 + current_continuation_token = headers['continuation'] + + if current_continuation_token is not None: + last_continuation_token = current_continuation_token + + if progress_hook is not None: + progress_hook(AccessControlChanges( + batch_counters=AccessControlChangeCounters( + directories_successful=resp.directories_successful, + files_successful=resp.files_successful, + failure_count=resp.failure_count, + ), + aggregate_counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count, + ), + batch_failures=[AccessControlChangeFailure( + name=failure.name, + is_directory=failure.type == 'DIRECTORY', + error_message=failure.error_message) for failure in resp.failed_entries], + continuation=last_continuation_token)) + + # update the continuation token, if there are more operations that cannot be completed in a single call + max_batches_satisfied = (max_batches is not None and batch_count == max_batches) + continue_operation = bool(current_continuation_token) and not max_batches_satisfied + options['continuation'] = current_continuation_token + + # currently the service stops on any failure, so we should send back the last continuation token + # for the user to retry the failed updates + # otherwise we should just return what the service gave us + return AccessControlChangeResult(counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count), + continuation=last_continuation_token + if total_failure_count > 0 and not continue_on_failure else current_continuation_token) + except AzureError as error: + raise DataLakeAclChangeFailedError(error, error.message, last_continuation_token) + + def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): + raise ValueError("metadata, permissions, umask is not supported for this operation") + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) + mod_conditions = get_mod_conditions(kwargs) + source_mod_conditions = get_source_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'rename_source': rename_source, + 'path_http_headers': path_http_headers, + 'lease_access_conditions': access_conditions, + 'source_lease_id': source_lease_id, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'mode': 'legacy', + 'cls': return_response_headers} + options.update(kwargs) + return options + + def _rename_path(self, rename_source, + **kwargs): + # type: (**Any) -> Dict[str, Any] + """ + Rename directory or file + + :param rename_source: + The value must have the following format: "/{filesystem}/{path}". + :type rename_source: str + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._rename_path_options( + rename_source, + **kwargs) + try: + return self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + def _get_path_properties(self, **kwargs): + # type: (**Any) -> Union[FileProperties, DirectoryProperties] + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file or directory. It does not return the content of the directory or file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties or FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a file/directory. + """ + path_properties = self._blob_client.get_blob_properties(**kwargs) + return path_properties + + def set_metadata(self, metadata, # type: Dict[str, str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + """ + return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) + + def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] + **kwargs): + # type: (...) -> Dict[str, Any] + """Sets system properties on the file or directory. + + If one property is set for the content_settings, all properties will be overriden. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set file/directory properties. + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file/directory-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) + + def acquire_lease(self, lease_duration=-1, # type: Optional[int] + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file or directory does not have an active lease, + the DataLake service creates a lease on the file/directory and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py new file mode 100644 index 0000000..ff67d27 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py @@ -0,0 +1,71 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import Union, Iterable, IO # pylint: disable=unused-import + + +class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + blob_query_reader + ): + self.name = blob_query_reader.name + self.file_system = blob_query_reader.container + self.response_headers = blob_query_reader.response_headers + self.record_delimiter = blob_query_reader.record_delimiter + self._bytes_processed = 0 + self._blob_query_reader = blob_query_reader + + def __len__(self): + return len(self._blob_query_reader) + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + return self._blob_query_reader.readall() + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + self._blob_query_reader(stream) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py new file mode 100644 index 0000000..34a7437 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py @@ -0,0 +1,89 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from azure.multiapi.storagev2.blob.v2019_07_07._serialize import _get_match_headers # pylint: disable=protected-access +from ._shared import encode_base64 +from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ + SourceModifiedAccessConditions, LeaseAccessConditions + + +def convert_dfs_url_to_blob_url(dfs_account_url): + return dfs_account_url.replace('.dfs.', '.blob.', 1) + + +def convert_datetime_to_rfc1123(date): + weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()] + month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", + "Oct", "Nov", "Dec"][date.month - 1] + return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month, + date.year, date.hour, date.minute, date.second) + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> str + headers = list() + if metadata: + for key, value in metadata.items(): + headers.append(key + '=') + headers.append(encode_base64(value)) + headers.append(',') + + if headers: + del headers[-1] + + return ''.join(headers) + + +def get_mod_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None) + ) + + +def get_source_mod_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + + +def get_path_http_headers(content_settings): + path_headers = PathHTTPHeaders( + cache_control=content_settings.cache_control, + content_type=content_settings.content_type, + content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + content_encoding=content_settings.content_encoding, + content_language=content_settings.content_language, + content_disposition=content_settings.content_disposition + ) + return path_headers + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_lease_id(lease): + if not lease: + return "" + try: + lease_id = lease.id + except AttributeError: + lease_id = lease + return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py new file mode 100644 index 0000000..14deea6 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py @@ -0,0 +1,437 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, + Optional, + Any, + Iterable, + Dict, + List, + Type, + Tuple, + TYPE_CHECKING, +) +import logging + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, + "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, + "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except StorageErrorException as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict(conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + except KeyError: + credential = conn_settings.get("SharedAccessSignature") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DefaultEndpointsProtocol"], + conn_settings["AccountName"], + service, + conn_settings["EndpointSuffix"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py new file mode 100644 index 0000000..d252ad0 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py @@ -0,0 +1,179 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except StorageErrorException as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py new file mode 100644 index 0000000..7fb05b5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated.version import VERSION + + +X_MS_VERSION = VERSION + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 2000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py new file mode 100644 index 0000000..0aeb96a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py @@ -0,0 +1,468 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes + +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.filedatalake.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py new file mode 100644 index 0000000..c9bc798 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py @@ -0,0 +1,610 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py new file mode 100644 index 0000000..4f15b65 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py @@ -0,0 +1,147 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py new file mode 100644 index 0000000..ac526e5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py @@ -0,0 +1,159 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.location_mode, deserialized + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body.iter(): + if info.tag.lower() == 'code': + error_code = info.text + elif info.tag.lower() == 'message': + error_message = info.text + else: + additional_data[info.tag] = info.text + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py new file mode 100644 index 0000000..29949d5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py @@ -0,0 +1,568 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = self._upload_substream_block(block_id, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, block_id, block_stream): + try: + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py new file mode 100644 index 0000000..29c0ee4 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py @@ -0,0 +1,367 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = await self._upload_substream_block(block_id, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, block_id, block_stream): + try: + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + chunk_end, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py new file mode 100644 index 0000000..b442bac --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py @@ -0,0 +1,391 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + +from azure.multiapi.storagev2.blob.v2019_07_07 import generate_account_sas as generate_blob_account_sas +from azure.multiapi.storagev2.blob.v2019_07_07 import generate_container_sas, generate_blob_sas +if TYPE_CHECKING: + import datetime + from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ + UserDelegationKey + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the DataLake service. + + Use the returned signature as the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The access key to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_blob_account_sas( + account_name=account_name, + account_key=account_key, + resource_types=resource_types, + permission=permission, + expiry=expiry, + **kwargs + ) + + +def generate_file_system_sas( + account_name, # type: str + file_system_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file system. + + Use the returned signature with the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str preauthorized_agent_object_id: + The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform + the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the + user delegation key has the required permissions before granting access but no additional permission check for + the agent object id will be performed. + :keyword str agent_object_id: + The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to + perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner + of the user delegation key has the required permissions before granting access and the service will perform an + additional POSIX ACL check to determine if this user is authorized to perform the requested operation. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_container_sas( + account_name=account_name, + container_name=file_system_name, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) + + +def generate_directory_sas( + account_name, # type: str + file_system_name, # type: str + directory_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a directory. + + Use the returned signature with the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str directory_name: + The name of the directory. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str preauthorized_agent_object_id: + The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform + the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the + user delegation key has the required permissions before granting access but no additional permission check for + the agent object id will be performed. + :keyword str agent_object_id: + The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to + perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner + of the user delegation key has the required permissions before granting access and the service will perform an + additional POSIX ACL check to determine if this user is authorized to perform the requested operation. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + depth = len(directory_name.strip("/").split("/")) + return generate_blob_sas( + account_name=account_name, + container_name=file_system_name, + blob_name=directory_name, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + sdd=depth, + is_directory=True, + **kwargs) + + +def generate_file_sas( + account_name, # type: str + file_system_name, # type: str + directory_name, # type: str + file_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any BDataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str directory_name: + The name of the directory. + :param str file_name: + The name of the file. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str preauthorized_agent_object_id: + The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform + the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the + user delegation key has the required permissions before granting access but no additional permission check for + the agent object id will be performed. + :keyword str agent_object_id: + The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to + perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner + of the user delegation key has the required permissions before granting access and the service will perform an + additional POSIX ACL check to determine if this user is authorized to perform the requested operation. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. This can only be used when to generate sas with delegation key. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if directory_name: + path = directory_name.rstrip('/') + "/" + file_name + else: + path = file_name + return generate_blob_sas( + account_name=account_name, + container_name=file_system_name, + blob_name=path, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py new file mode 100644 index 0000000..d1a98dd --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py @@ -0,0 +1,87 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from ._deserialize import ( + process_storage_error) +from ._generated.models import ( + StorageErrorException, +) +from ._shared.response_handlers import return_response_headers +from ._shared.uploads import ( + upload_data_chunks, + DataLakeFileChunkUploader) + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_datalake_file( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + validate_content=None, + max_concurrency=None, + **kwargs): + try: + if length == 0: + return {} + properties = kwargs.pop('properties', None) + umask = kwargs.pop('umask', None) + permissions = kwargs.pop('permissions', None) + path_http_headers = kwargs.pop('path_http_headers', None) + modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) + + if not overwrite: + # if customers didn't specify access conditions, they cannot flush data to existing file + if not _any_conditions(modified_access_conditions): + modified_access_conditions.if_none_match = '*' + if properties or umask or permissions: + raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") + + if overwrite: + response = client.create( + resource='file', + path_http_headers=path_http_headers, + properties=properties, + modified_access_conditions=modified_access_conditions, + umask=umask, + permissions=permissions, + cls=return_response_headers, + **kwargs) + + # this modified_access_conditions will be applied to flush_data to make sure + # no other flush between create and the current flush + modified_access_conditions.if_match = response['etag'] + modified_access_conditions.if_none_match = None + modified_access_conditions.if_modified_since = None + modified_access_conditions.if_unmodified_since = None + + upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + + return client.flush_data(position=length, + path_http_headers=path_http_headers, + modified_access_conditions=modified_access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py new file mode 100644 index 0000000..dd22d87 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.2.0b1" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py new file mode 100644 index 0000000..c24dde8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py @@ -0,0 +1,24 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._download_async import StorageStreamDownloader +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._data_lake_file_client_async import DataLakeFileClient +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._file_system_client_async import FileSystemClient +from ._data_lake_service_client_async import DataLakeServiceClient +from ._data_lake_lease_async import DataLakeLeaseClient + +__all__ = [ + 'DataLakeServiceClient', + 'FileSystemClient', + 'DataLakeDirectoryClient', + 'DataLakeFileClient', + 'DataLakeLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py new file mode 100644 index 0000000..c09c3f3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py @@ -0,0 +1,526 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore +from ._data_lake_file_client_async import DataLakeFileClient +from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase +from .._models import DirectoryProperties +from .._deserialize import deserialize_dir_properties +from ._path_client_async import PathClient + + +class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): + """A client to interact with the DataLake directory, even if the directory may not yet exist. + + For operations relating to a specific subdirectory or file under the directory, a directory client or file client + can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param directory_name: + The whole path of the directory. eg. {directory under file system}/{directory to interact with} + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py + :start-after: [START instantiate_directory_client_from_conn_str] + :end-before: [END instantiate_directory_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call + credential=credential, **kwargs) + + async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new directory. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the directory has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 8 + :caption: Create directory. + """ + return await self._create('directory', metadata=metadata, **kwargs) + + async def delete_directory(self, **kwargs): + # type: (...) -> None + """ + Marks the specified directory for deletion. + + :keyword lease: + Required if the directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 4 + :caption: Delete directory. + """ + return await self._delete(**kwargs) + + async def get_directory_properties(self, **kwargs): + # type: (**Any) -> DirectoryProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the directory. It does not return the content of the directory. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START get_directory_properties] + :end-before: [END get_directory_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file/directory. + """ + return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access + + async def rename_directory(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeDirectoryClient + """ + Rename the source directory. + + :param str new_name: + the new directory name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}". + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START rename_directory] + :end-before: [END rename_directory] + :language: python + :dedent: 4 + :caption: Rename the source directory. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new directory") + if not self._raw_credential and new_file_system == self.file_system_name: + new_dir_sas = self._query_str.strip('?') + + new_directory_client = DataLakeDirectoryClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, + credential=self._raw_credential or new_dir_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await new_directory_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_directory_client + + async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create a subdirectory and return the subdirectory client to be interacted with. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory. + """ + subdir = self.get_sub_directory_client(sub_directory) + await subdir.create_directory(metadata=metadata, **kwargs) + return subdir + + async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified subdirectory for deletion. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory + """ + subdir = self.get_sub_directory_client(sub_directory) + await subdir.delete_directory(**kwargs) + return subdir + + async def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create a new file and return the file client to be interacted with. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + """ + file_client = self.get_file_client(file) + await file_client.create_file(**kwargs) + return file_client + + def get_file_client(self, file # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. eg. directory/subdirectory/file + :type file: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_client] + :end-before: [END bsc_get_file_client] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file.name + except AttributeError: + file_path = self.path_name + '/' + file + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified subdirectory of the current directory. + + The sub subdirectory need not already exist. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_directory_client] + :end-before: [END bsc_get_directory_client] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + subdir_path = sub_directory.name + except AttributeError: + subdir_path = self.path_name + '/' + sub_directory + + return DataLakeDirectoryClient( + self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py new file mode 100644 index 0000000..90fd7ca --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py @@ -0,0 +1,553 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +from ._download_async import StorageStreamDownloader +from ._path_client_async import PathClient +from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase +from .._serialize import convert_datetime_to_rfc1123 +from .._deserialize import process_storage_error, deserialize_file_properties +from .._generated.models import StorageErrorException +from .._models import FileProperties +from ..aio._upload_helper import upload_datalake_file + + +class DataLakeFileClient(PathClient, DataLakeFileClientBase): + """A client to interact with the DataLake file, even if the file may not yet exist. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param file_path: + The whole file path, so that to interact with a specific file. + eg. "{directory}/{subdirectory}/{file}" + :type file_path: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py + :start-after: [START instantiate_file_client_from_conn_str] + :end-before: [END instantiate_file_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, + credential=credential, **kwargs) + + async def create_file(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new file. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 4 + :caption: Create file. + """ + return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) + + async def delete_file(self, **kwargs): + # type: (...) -> None + """ + Marks the specified file for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 4 + :caption: Delete file. + """ + return await self._delete(**kwargs) + + async def get_file_properties(self, **kwargs): + # type: (**Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. It does not return the content of the file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START get_file_properties] + :end-before: [END get_file_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file. + """ + return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access + + async def set_file_expiry(self, expiry_options, # type: str + expires_on=None, # type: Optional[Union[datetime, int]] + **kwargs): + # type: (str, Optional[Union[datetime, int]], **Any) -> None + """Sets the time a file will expire and be deleted. + + :param str expiry_options: + Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' + :param datetime or int expires_on: + The time to set the file to expiry. + When expiry_options is RelativeTo*, expires_on should be an int in milliseconds + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + expires_on = convert_datetime_to_rfc1123(expires_on) + except AttributeError: + expires_on = str(expires_on) + await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on, + **kwargs) # pylint: disable=protected-access + + async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + overwrite=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Upload data to a file. + + :param data: Content to be uploaded to file + :param int length: Size of the data in bytes. + :param bool overwrite: to overwrite an existing file or not. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the blob as metadata. + :paramtype metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. + :return: response dict (Etag and last modified). + """ + options = self._upload_options( + data, + length=length, + overwrite=overwrite, + **kwargs) + return await upload_datalake_file(**options) + + async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + offset, # type: int + length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Append data to the file. + + :param data: Content to be appended to file + :param offset: start position of the data to be appended to. + :param length: Size of the data in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :return: dict of the response header + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START append_data] + :end-before: [END append_data] + :language: python + :dedent: 4 + :caption: Append data to the file. + """ + options = self._append_data_options( + data, + offset, + length=length, + **kwargs) + try: + return await self._client.path.append_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def flush_data(self, offset, # type: int + retain_uncommitted_data=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ Commit the previous appended data. + + :param offset: offset is equal to the length of the file after commit the + previous appended data. + :param bool retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword bool close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :return: response header in dict + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START upload_file_to_file_system] + :end-before: [END upload_file_to_file_system] + :language: python + :dedent: 12 + :caption: Commit the previous appended data. + """ + options = self._flush_data_options( + offset, + retain_uncommitted_data=retain_uncommitted_data, **kwargs) + try: + return await self._client.path.flush_data(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def download_file(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content, or readinto() must be used to download the file into + a stream. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword lease: + If specified, download only succeeds if the file's lease is active + and matches this ID. Required if the file has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START read_file] + :end-before: [END read_file] + :language: python + :dedent: 4 + :caption: Return the downloaded data. + """ + downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) + return StorageStreamDownloader(downloader) + + async def rename_file(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeFileClient + """ + Rename the source file. + + :param str new_name: the new file name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: the renamed file client + :rtype: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START rename_file] + :end-before: [END rename_file] + :language: python + :dedent: 4 + :caption: Rename the source file. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_file_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new file") + if not self._raw_credential and new_file_system == self.file_system_name: + new_file_sas = self._query_str.strip('?') + + new_file_client = DataLakeFileClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, + credential=self._raw_credential or new_file_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await new_file_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_file_client diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py new file mode 100644 index 0000000..836e45b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py @@ -0,0 +1,243 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, + TypeVar, TYPE_CHECKING +) +from azure.multiapi.storagev2.blob.v2019_07_07.aio import BlobLeaseClient +from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase + + +if TYPE_CHECKING: + FileSystemClient = TypeVar("FileSystemClient") + DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") + DataLakeFileClient = TypeVar("DataLakeFileClient") + + +class DataLakeLeaseClient(DataLakeLeaseClientBase): + """Creates a new DataLakeLeaseClient. + + This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file system, directory, or file to lease. + :type client: ~azure.storage.filedatalake.aio.FileSystemClient or + ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None + super(DataLakeLeaseClient, self).__init__(client, lease_id) + + if hasattr(client, '_blob_client'): + _client = client._blob_client # type: ignore # pylint: disable=protected-access + elif hasattr(client, '_container_client'): + _client = client._container_client # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") + + self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Optional[int], **Any) -> None + """Requests a new lease. + + If the file/file system does not have an active lease, the DataLake service creates a + lease on the file/file system and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) + self._update_lease_client_attributes() + + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the file system or file. Note that + the lease may be renewed even if it has expired as long as the file system + or file has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.renew(**kwargs) + self._update_lease_client_attributes() + + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the file system or file. Releasing the lease allows another client + to immediately acquire the lease for the file system or file as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.release(**kwargs) + self._update_lease_client_attributes() + + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) + self._update_lease_client_attributes() + + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the file system or file has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the file system or file. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py new file mode 100644 index 0000000..9c9df14 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py @@ -0,0 +1,372 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from azure.core.paging import ItemPaged + +from azure.multiapi.storagev2.blob.v2019_07_07.aio import BlobServiceClient +from .._generated.aio import DataLakeStorageClient +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from ._file_system_client_async import FileSystemClient +from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase +from .._shared.policies_async import ExponentialRetry +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._data_lake_file_client_async import DataLakeFileClient +from ._models import FileSystemPropertiesPaged +from .._models import UserDelegationKey, LocationMode + + +class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): + """A client to interact with the DataLake Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete file systems within the account. + For operations relating to a specific file system, directory or file, clients for those entities + can also be retrieved using the `get_client` functions. + + :ivar str url: + The full endpoint URL to the datalake service endpoint. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URL to the DataLake storage account. Any other entities included + in the URL path (e.g. file system or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(DataLakeServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs + ) + self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) + self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access + self._client = DataLakeStorageClient(self.url, None, None, pipeline=self._pipeline) + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._blob_service_client.close() + await super(DataLakeServiceClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._blob_service_client.close() + await self.__aexit__() + + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.filedatalake.UserDelegationKey + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_user_delegation_key] + :end-before: [END get_user_delegation_key] + :language: python + :dedent: 8 + :caption: Get user delegation key from datalake service client. + """ + delegation_key = await self._blob_service_client.get_user_delegation_key( + key_start_time=key_start_time, + key_expiry_time=key_expiry_time, + **kwargs) # pylint: disable=protected-access + return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + + def list_file_systems(self, name_starts_with=None, # type: Optional[str] + include_metadata=None, # type: Optional[bool] + **kwargs): + # type: (...) -> ItemPaged[FileSystemProperties] + """Returns a generator to list the file systems under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all file systems have been returned. + + :param str name_starts_with: + Filters the results to return only file systems whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that file system metadata be returned in the response. + The default value is `False`. + :keyword int results_per_page: + The maximum number of file system names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of FileSystemProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START list_file_systems] + :end-before: [END list_file_systems] + :language: python + :dedent: 8 + :caption: Listing the file systems in the datalake service. + """ + item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, + include_metadata=include_metadata, + **kwargs) # pylint: disable=protected-access + item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access + return item_paged + + async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> FileSystemClient + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param str file_system: + The name of the file system to create. + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: file system, file. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_file_system_from_service_client] + :end-before: [END create_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Creating a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) + return file_system_client + + async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] + **kwargs): + # type: (...) -> FileSystemClient + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :param file_system: + The file system to delete. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START delete_file_system_from_service_client] + :end-before: [END delete_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Deleting a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + await file_system_client.delete_file_system(**kwargs) + return file_system_client + + def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] + ): + # type: (...) -> FileSystemClient + """Get a client to interact with the specified file system. + + The file system need not already exist. + + :param file_system: + The file system. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :returns: A FileSystemClient. + :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Getting the file system client to interact with a specific file system. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + + return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, + _configuration=self._config, + _pipeline=self._pipeline, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] + directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param file_system: + The file system that the directory is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_directory_client_from_service_client] + :end-before: [END get_directory_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] + file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_system: + The file system that the file is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param file_path: + The file with which to interact. This can either be the full path of the file(from the root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_file_client_from_service_client] + :end-before: [END get_file_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py new file mode 100644 index 0000000..ea27438 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from .._deserialize import from_blob_properties + + +class StorageStreamDownloader(object): + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar ~azure.storage.filedatalake.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__(self, downloader): + self._downloader = downloader + self.name = self._downloader.name + self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access + self.size = self._downloader.size + + def __len__(self): + return self.size + + def chunks(self): + return self._downloader.chunks() + + async def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + return await self._downloader.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py new file mode 100644 index 0000000..15409d5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py @@ -0,0 +1,745 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Dict, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace + +from azure.core.async_paging import AsyncItemPaged + +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.multiapi.storagev2.blob.v2019_07_07.aio import ContainerClient + +from ._data_lake_file_client_async import DataLakeFileClient +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._models import PathPropertiesPaged +from ._data_lake_lease_async import DataLakeLeaseClient +from .._file_system_client import FileSystemClient as FileSystemClientBase +from .._generated.aio import DataLakeStorageClient +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.policies_async import ExponentialRetry +from .._models import FileSystemProperties, PublicAccess + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings) + + +class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): + """A client to interact with a specific file system, even if that file system + may not yet exist. + + For operations relating to a specific directory or file within this file system, a directory client or file client + can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the URL already has a SAS token, specifying an explicit credential will take priority. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(FileSystemClient, self).__init__( + account_url, + file_system_name=file_system_name, + credential=credential, + **kwargs) + # to override the class field _container_client sync version + kwargs.pop('_hosts', None) + self._container_client = ContainerClient(self._blob_account_url, file_system_name, + credential=credential, + _hosts=self._container_client._hosts,# pylint: disable=protected-access + **kwargs) # type: ignore # pylint: disable=protected-access + self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._container_client.close() + await super(FileSystemClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._container_client.close() + await self.__aexit__() + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file system does not have an active lease, + the DataLake service creates a lease on the file system and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the file_system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system] + :end-before: [END create_file_system] + :language: python + :dedent: 16 + :caption: Creating a file system in the datalake service. + """ + return await self._container_client.create_container(metadata=metadata, + public_access=public_access, + **kwargs) + + @distributed_trace_async + async def delete_file_system(self, **kwargs): + # type: (Any) -> None + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_file_system] + :end-before: [END delete_file_system] + :language: python + :dedent: 16 + :caption: Deleting a file system in the datalake service. + """ + await self._container_client.delete_container(**kwargs) + + @distributed_trace_async + async def get_file_system_properties(self, **kwargs): + # type: (Any) -> FileSystemProperties + """Returns all user-defined metadata and system properties for the specified + file system. The data returned does not include the file system's list of paths. + + :keyword lease: + If specified, get_file_system_properties only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified file system within a file system object. + :rtype: ~azure.storage.filedatalake.FileSystemProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_file_system_properties] + :end-before: [END get_file_system_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the file system. + """ + container_properties = await self._container_client.get_container_properties(**kwargs) + return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access + + @distributed_trace_async + async def set_file_system_metadata( # type: ignore + self, metadata, # type: Dict[str, str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) + + @distributed_trace_async + async def set_file_system_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified file system or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a file system may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the file system. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] + :param ~azure.storage.filedatalake.PublicAccess public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :keyword lease: + Required if the file system has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: filesystem-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + """ + return await self._container_client.set_container_access_policy(signed_identifiers, + public_access=public_access, **kwargs) + + @distributed_trace_async + async def get_file_system_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified file system. + The permissions indicate whether file system data may be accessed publicly. + + :keyword lease: + If specified, get_file_system_access_policy only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_policy = await self._container_client.get_container_access_policy(**kwargs) + return { + 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access + 'signed_identifiers': access_policy['signed_identifiers'] + } + + @distributed_trace + def get_paths(self, path=None, # type: Optional[str] + recursive=True, # type: Optional[bool] + max_results=None, # type: Optional[int] + **kwargs): + # type: (...) -> ItemPaged[PathProperties] + """Returns a generator to list the paths(could be files or directories) under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str path: + Filters the results to return only paths under the specified path. + :param int max_results: + An optional value that specifies the maximum + number of items to return per page. If omitted or greater than 5,000, the + response will include up to 5,000 items per page. + :keyword upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of PathProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_paths_in_file_system] + :end-before: [END get_paths_in_file_system] + :language: python + :dedent: 12 + :caption: List the blobs in the file system. + """ + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.file_system.list_paths, + path=path, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, recursive, path=path, max_results=max_results, + page_iterator_class=PathPropertiesPaged, **kwargs) + + @distributed_trace_async + async def create_directory(self, directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create directory + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_directory_from_file_system] + :end-before: [END create_directory_from_file_system] + :language: python + :dedent: 12 + :caption: Create directory in the file system. + """ + directory_client = self.get_directory_client(directory) + await directory_client.create_directory(metadata=metadata, **kwargs) + return directory_client + + @distributed_trace_async + async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified path for deletion. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_directory_from_file_system] + :end-before: [END delete_directory_from_file_system] + :language: python + :dedent: 12 + :caption: Delete directory in the file system. + """ + directory_client = self.get_directory_client(directory) + await directory_client.delete_directory(**kwargs) + return directory_client + + @distributed_trace_async + async def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create file + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_from_file_system] + :end-before: [END create_file_from_file_system] + :language: python + :dedent: 12 + :caption: Create file in the file system. + """ + file_client = self.get_file_client(file) + await file_client.create_file(**kwargs) + return file_client + + @distributed_trace_async + async def delete_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Marks the specified file for deletion. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_file_from_file_system] + :end-before: [END delete_file_from_file_system] + :language: python + :dedent: 12 + :caption: Delete file in the file system. + """ + file_client = self.get_file_client(file) + await file_client.delete_file(**kwargs) + return file_client + + def _get_root_directory_client(self): + # type: () -> DataLakeDirectoryClient + """Get a client to interact with the root directory. + + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + """ + return self.get_directory_client('/') + + def get_directory_client(self, directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_directory_client_from_file_system] + :end-before: [END get_directory_client_from_file_system] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + credential=self._raw_credential, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, + loop=self._loop + ) + + def get_file_client(self, file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_path: + The file with which to interact. This can either be the path of the file(from root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_file_client_from_file_system] + :end-before: [END get_file_client_from_file_system] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file_path.name + except AttributeError: + pass + + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py new file mode 100644 index 0000000..ba143f6 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py @@ -0,0 +1,110 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from azure.core.async_paging import AsyncPageIterator +from azure.multiapi.storagev2.blob.v2019_07_07.aio._models import ContainerPropertiesPaged + +from .._deserialize import return_headers_and_deserialized_path_list, process_storage_error +from .._generated.models import StorageErrorException, Path +from .._models import PathProperties + +from .._models import FileSystemProperties + + +class FileSystemPropertiesPaged(ContainerPropertiesPaged): + """An Iterable of File System properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file system name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only file systems whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of file system names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__(self, *args, **kwargs): + super(FileSystemPropertiesPaged, self).__init__( + *args, + **kwargs + ) + + @staticmethod + def _build_item(item): + return FileSystemProperties._from_generated(item) # pylint: disable=protected-access + + +class PathPropertiesPaged(AsyncPageIterator): + """An Iterable of Path properties. + + :ivar str path: Filters the results to return only paths under the specified path. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. + + :param callable command: Function to retrieve the next page of items. + :param str path: Filters the results to return only paths under the specified path. + :param int max_results: The maximum number of psths to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__( + self, command, + recursive, + path=None, + max_results=None, + continuation_token=None, + upn=None): + super(PathPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.recursive = recursive + self.results_per_page = max_results + self.path = path + self.upn = upn + self.current_page = None + self.path_list = None + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + self.recursive, + continuation=continuation_token or None, + path=self.path, + max_results=self.results_per_page, + upn=self.upn, + cls=return_headers_and_deserialized_path_list) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.path_list, self._response = get_next_return + self.current_page = [self._build_item(item) for item in self.path_list] + + return self._response['continuation'] or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, PathProperties): + return item + if isinstance(item, Path): + path = PathProperties._from_generated(item) # pylint: disable=protected-access + return path + return item diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py new file mode 100644 index 0000000..509c0fd --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py @@ -0,0 +1,703 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from azure.core.exceptions import AzureError +from azure.multiapi.storagev2.blob.v2019_07_07.aio import BlobClient +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._path_client import PathClient as PathClientBase +from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \ + AccessControlChangeCounters, AccessControlChanges, DataLakeAclChangeFailedError +from .._generated.aio import DataLakeStorageClient +from ._data_lake_lease_async import DataLakeLeaseClient +from .._generated.models import StorageErrorException +from .._deserialize import process_storage_error +from .._shared.policies_async import ExponentialRetry + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): + def __init__( + self, account_url, # type: str + file_system_name, # type: str + path_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + + super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call + file_system_name, path_name, + credential=credential, + **kwargs) # type: ignore + + kwargs.pop('_hosts', None) + + self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=file_system_name, + blob_name=path_name, + credential=credential, + _hosts=self._blob_client._hosts, # pylint: disable=protected-access + **kwargs) + + self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) + self._datalake_client_for_blob_operation = DataLakeStorageClient(self._blob_client.url, + file_system_name, path_name, + pipeline=self._pipeline) + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._blob_client.close() + await super(PathClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._blob_client.close() + await self.__aexit__() + + async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create directory or file + + :param resource_type: + Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource_type: str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file/directory as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Dict[str, Union[str, datetime]] + """ + options = self._create_path_options( + resource_type, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def _delete(self, **kwargs): + # type: (bool, **Any) -> None + """ + Marks the specified path for deletion. + + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + options = self._delete_path_options(**kwargs) + try: + return await self._client.path.delete(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def set_access_control(self, owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Set the owner, group, permissions, or access control list for a path. + + :param owner: + Optional. The owner of the file or directory. + :type owner: str + :param group: + Optional. The owning group of the file or directory. + :type group: str + :param permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. + :type permissions: str + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. + :type acl: str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict (Etag and last modified). + """ + options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) + try: + return await self._client.path.set_access_control(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def get_access_control(self, upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Get the owner, group, permissions, or access control list for a path. + + :param upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict. + """ + options = self._get_access_control_options(upn=upn, **kwargs) + try: + return await self._client.path.get_properties(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def set_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Sets the Access Control on a path and sub-paths. + + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) + return await self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + async def update_access_control_recursive(self, acl, **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Modifies the Access Control on a path and sub-paths. + + :param acl: + Modifies POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single, + change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) + return await self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + async def remove_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Removes the Access Control on a path and sub-paths. + + :param acl: + Removes POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, and a user or + group identifier in the format "[scope:][type]:[id]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) + return await self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + async def _set_access_control_internal(self, options, progress_hook, max_batches=None): + try: + continue_on_failure = options.get('force_flag') + total_directories_successful = 0 + total_files_success = 0 + total_failure_count = 0 + batch_count = 0 + last_continuation_token = None + current_continuation_token = None + continue_operation = True + while continue_operation: + headers, resp = await self._client.path.set_access_control_recursive(**options) + + # make a running tally so that we can report the final results + total_directories_successful += resp.directories_successful + total_files_success += resp.files_successful + total_failure_count += resp.failure_count + batch_count += 1 + current_continuation_token = headers['continuation'] + + if current_continuation_token is not None: + last_continuation_token = current_continuation_token + + if progress_hook is not None: + await progress_hook(AccessControlChanges( + batch_counters=AccessControlChangeCounters( + directories_successful=resp.directories_successful, + files_successful=resp.files_successful, + failure_count=resp.failure_count, + ), + aggregate_counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count, + ), + batch_failures=[AccessControlChangeFailure( + name=failure.name, + is_directory=failure.type == 'DIRECTORY', + error_message=failure.error_message) for failure in resp.failed_entries], + continuation=last_continuation_token)) + + # update the continuation token, if there are more operations that cannot be completed in a single call + max_batches_satisfied = (max_batches is not None and batch_count == max_batches) + continue_operation = bool(current_continuation_token) and not max_batches_satisfied + options['continuation'] = current_continuation_token + + # currently the service stops on any failure, so we should send back the last continuation token + # for the user to retry the failed updates + # otherwise we should just return what the service gave us + return AccessControlChangeResult(counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count), + continuation=last_continuation_token + if total_failure_count > 0 and not continue_on_failure else current_continuation_token) + except AzureError as error: + raise DataLakeAclChangeFailedError(error, error.message, last_continuation_token) + + async def _rename_path(self, rename_source, + **kwargs): + # type: (**Any) -> Dict[str, Any] + """ + Rename directory or file + + :param rename_source: The value must have the following format: "/{filesystem}/{path}". + :type rename_source: str + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._rename_path_options( + rename_source, + **kwargs) + try: + return await self._client.path.create(**options) + except StorageErrorException as error: + process_storage_error(error) + + async def _get_path_properties(self, **kwargs): + # type: (**Any) -> Union[FileProperties, DirectoryProperties] + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file or directory. It does not return the content of the directory or file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties or FileProperties + """ + path_properties = await self._blob_client.get_blob_properties(**kwargs) + return path_properties + + async def set_metadata(self, metadata, # type: Dict[str, str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + """ + return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) + + async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] + **kwargs): + # type: (...) -> Dict[str, Any] + """Sets system properties on the file or directory. + + If one property is set for the content_settings, all properties will be overriden. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set file/directory properties. + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file/directory-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) + + async def acquire_lease(self, lease_duration=-1, # type: Optional[int] + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file or directory does not have an active lease, + the DataLake service creates a lease on the file/directory and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file_system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py new file mode 100644 index 0000000..93da7bf --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py @@ -0,0 +1,87 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from .._deserialize import ( + process_storage_error) +from .._generated.models import ( + StorageErrorException, +) +from .._shared.response_handlers import return_response_headers +from .._shared.uploads_async import ( + upload_data_chunks, + DataLakeFileChunkUploader) + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +async def upload_datalake_file( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + validate_content=None, + max_concurrency=None, + **kwargs): + try: + if length == 0: + return {} + properties = kwargs.pop('properties', None) + umask = kwargs.pop('umask', None) + permissions = kwargs.pop('permissions', None) + path_http_headers = kwargs.pop('path_http_headers', None) + modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) + + if not overwrite: + # if customers didn't specify access conditions, they cannot flush data to existing file + if not _any_conditions(modified_access_conditions): + modified_access_conditions.if_none_match = '*' + if properties or umask or permissions: + raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") + + if overwrite: + response = await client.create( + resource='file', + path_http_headers=path_http_headers, + properties=properties, + modified_access_conditions=modified_access_conditions, + umask=umask, + permissions=permissions, + cls=return_response_headers, + **kwargs) + + # this modified_access_conditions will be applied to flush_data to make sure + # no other flush between create and the current flush + modified_access_conditions.if_match = response['etag'] + modified_access_conditions.if_none_match = None + modified_access_conditions.if_modified_since = None + modified_access_conditions.if_unmodified_since = None + + await upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + + return await client.flush_data(position=length, + path_http_headers=path_http_headers, + modified_access_conditions=modified_access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py new file mode 100644 index 0000000..7c838c1 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py @@ -0,0 +1,74 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._version import VERSION +from ._file_client import ShareFileClient +from ._directory_client import ShareDirectoryClient +from ._share_client import ShareClient +from ._share_service_client import ShareServiceClient +from ._lease import ShareLeaseClient +from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import ( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode) +from ._models import ( + ShareProperties, + DirectoryProperties, + Handle, + FileProperties, + Metrics, + RetentionPolicy, + CorsRule, + ShareSmbSettings, + SmbMultichannel, + ShareProtocolSettings, + AccessPolicy, + FileSasPermissions, + ShareSasPermissions, + ContentSettings, + NTFSAttributes) +from ._generated.models import ( + HandleItem +) + +__version__ = VERSION + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageErrorCode', + 'Metrics', + 'RetentionPolicy', + 'CorsRule', + 'ShareSmbSettings', + 'SmbMultichannel', + 'ShareProtocolSettings', + 'AccessPolicy', + 'FileSasPermissions', + 'ShareSasPermissions', + 'ShareProperties', + 'DirectoryProperties', + 'FileProperties', + 'ContentSettings', + 'Handle', + 'NTFSAttributes', + 'HandleItem', + 'generate_account_sas', + 'generate_share_sas', + 'generate_file_sas' +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py new file mode 100644 index 0000000..a4b3500 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py @@ -0,0 +1,83 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Tuple, Dict, List, + TYPE_CHECKING +) + +from ._models import ShareProperties, DirectoryProperties, FileProperties +from ._shared.response_handlers import deserialize_metadata +from ._generated.models import ShareFileRangeList + + +def deserialize_share_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + share_properties = ShareProperties( + metadata=metadata, + **headers + ) + return share_properties + + +def deserialize_directory_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + directory_properties = DirectoryProperties( + metadata=metadata, + **headers + ) + return directory_properties + + +def deserialize_file_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = FileProperties( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties + + +def deserialize_file_stream(response, obj, headers): + file_properties = deserialize_file_properties(response, obj, headers) + obj.properties = file_properties + return response.location_mode, obj + + +def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission + ''' + + return obj.permission + + +def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission key + ''' + + if response is None or headers is None: + return None + return headers.get('x-ms-file-permission-key', None) + + +def get_file_ranges_result(ranges): + # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + file_ranges = [] # type: ignore + clear_ranges = [] # type: List + if ranges.ranges: + file_ranges = [ + {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore + if ranges.clear_ranges: + clear_ranges = [ + {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] + return file_ranges, clear_ranges diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py new file mode 100644 index 0000000..f1c7c05 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py @@ -0,0 +1,706 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +import time +from typing import ( # pylint: disable=unused-import + Optional, Union, Any, Dict, TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace + +from ._generated import AzureFileStorage +from ._generated.version import VERSION +from ._generated.models import StorageErrorException +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._deserialize import deserialize_directory_properties +from ._serialize import get_api_version +from ._file_client import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ShareProperties, DirectoryProperties, ContentSettings + from ._generated.models import HandleItem + + +class ShareDirectoryClient(StorageAccountHostsMixin): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + directory_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.directory_path = directory_path + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + @classmethod + def from_directory_url(cls, directory_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> ShareDirectoryClient + """Create a ShareDirectoryClient from a directory url. + + :param str directory_url: + The full URI to the directory. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + try: + if not directory_url.lower().startswith('http'): + directory_url = "https://" + directory_url + except AttributeError: + raise ValueError("Directory URL must be a string.") + parsed_url = urlparse(directory_url.rstrip('/')) + if not parsed_url.path and not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(directory_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + path_snapshot, _ = parse_query(parsed_url.query) + + share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') + share_name = unquote(share_name) + + directory_path = path_dir + snapshot = snapshot or path_snapshot + + return cls( + account_url=account_url, share_name=share_name, directory_path=directory_path, + credential=credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + directory_path = "" + if self.directory_path: + directory_path = "/" + quote(self.directory_path, safe='~') + return "{}://{}/{}{}{}".format( + self.scheme, + hostname, + quote(share_name), + directory_path, + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + directory_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareDirectoryClient + """Create ShareDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str directory_path: + The directory path. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 12 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode, **kwargs) + + @distributed_trace + def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 12 + :caption: Creates a directory. + """ + timeout = kwargs.pop('timeout', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.directory.create( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 12 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + self._client.directory.delete(timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], **Any) -> ItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 12 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> ItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace + def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace + def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def create_subdirectory( + self, directory_name, # type: str + **kwargs): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 12 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace + def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 12 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace + def upload_file( + self, file_name, # type: str + data, # type: Any + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 12 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + file_client.upload_file( + data, + length=length, + **kwargs) + return file_client # type: ignore + + @distributed_trace + def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 12 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py new file mode 100644 index 0000000..8a86027 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py @@ -0,0 +1,522 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import warnings +from io import BytesIO + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.common import with_current_context +from ._shared.encryption import decrypt_blob +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range + + +def process_range_and_offset(start_range, end_range, length, encryption): + start_offset, end_offset = 0, 0 + if encryption.get("key") is not None or encryption.get("resolver") is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = b"".join(list(data)) + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + **kwargs + ): + self.client = client + + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + range_header, range_validation = validate_and_format_range_headers( + download_range[0], download_range[1], check_content_md5=self.validate_content + ) + + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader): + self.size = size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # If no iterator was supplied, the download completed with + # the initial GET, so we just return that data + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + else: + chunk = next(self._iter_chunks) + self._current_content = self._iter_downloader.yield_chunk(chunk) + + return self._current_content + + next = __next__ # Python 2 compatibility. + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + try: + location_mode, response = self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + return response + + def chunks(self): + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader) + + def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + if parallel: + import concurrent.futures + executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py new file mode 100644 index 0000000..631adac --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py @@ -0,0 +1,1395 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, too-many-public-methods +import functools +import time +from io import BytesIO +from typing import ( # pylint: disable=unused-import + Optional, Union, IO, List, Dict, Any, Iterable, Tuple, + TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports +from azure.core.tracing.decorator import distributed_trace + +from ._generated import AzureFileStorage +from ._generated.version import VERSION +from ._generated.models import StorageErrorException, FileHTTPHeaders +from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, get_length +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._lease import ShareLeaseClient +from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version +from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result +from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import +from ._download import StorageStreamDownloader + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ShareProperties, ContentSettings, FileProperties, Handle + from ._generated.models import HandleItem + + +def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + **kwargs): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = client.create_file( + size, + content_settings=content_settings, + metadata=metadata, + timeout=timeout, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + **kwargs + ) + if size == 0: + return response + + responses = upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except StorageErrorException as error: + process_storage_error(error) + + +class ShareFileClient(StorageAccountHostsMixin): + """A client to interact with a specific file, although that file may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not (share_name and file_path): + raise ValueError("Please specify a share name and file name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.file_path = file_path.split('/') + self.file_name = self.file_path[-1] + self.directory_path = "/".join(self.file_path[:-1]) + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + @classmethod + def from_file_url( + cls, file_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """A client to interact with a specific file, although that file may not yet exist. + + :param str file_url: The full URI to the file. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + try: + if not file_url.lower().startswith('http'): + file_url = "https://" + file_url + except AttributeError: + raise ValueError("File URL must be a string.") + parsed_url = urlparse(file_url.rstrip('/')) + + if not (parsed_url.netloc and parsed_url.path): + raise ValueError("Invalid URL: {}".format(file_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + + path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') + path_snapshot, _ = parse_query(parsed_url.query) + snapshot = snapshot or path_snapshot + share_name = unquote(path_share) + file_path = '/'.join([unquote(p) for p in path_file.split('/')]) + return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + "/".join([quote(p, safe='~') for p in self.file_path]), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Create ShareFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str file_path: + The file path. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START create_file_client] + :end-before: [END create_file_client] + :language: python + :dedent: 12 + :caption: Creates the file client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) + + @distributed_trace + def acquire_lease(self, lease_id=None, **kwargs): + # type: (Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START acquire_and_release_lease_on_file] + :end-before: [END acquire_and_release_lease_on_file] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a file. + """ + kwargs['lease_duration'] = -1 + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_file( # type: ignore + self, size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 12 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + try: + return self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def upload_file( + self, data, # type: Any + length=None, # type: Optional[int] + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Uploads a new file. + + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 12 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + + if isinstance(data, six.text_type): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) # type: ignore + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return _upload_file_helper( # type: ignore + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + **kwargs) + + @distributed_trace + def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 12 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + try: + self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def download_file( + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Iterable[bytes] + """Downloads a file to a stream with automatic chunking. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A iterable data generator (stream) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 12 + :caption: Download a file. + """ + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + return StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + encryption_options=None, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs) + + @distributed_trace + def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 12 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = '/'.join(self.file_path) + return file_props # type: ignore + + @distributed_trace + def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop('size', None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_file_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.file.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + metadata=metadata, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def upload_range( # type: ignore + self, data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if isinstance(data, six.text_type): + data = data.encode(encoding) + + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @staticmethod + def _upload_range_from_url_options(source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + + if offset is None: + raise ValueError("offset must be provided.") + if length is None: + raise ValueError("length must be provided.") + if source_offset is None: + raise ValueError("source_offset must be provided.") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) + + source_mod_conditions = get_source_conditions(kwargs) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + options = { + 'copy_source': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_modified_access_conditions': source_mod_conditions, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.file.upload_range_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + def _get_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + content_range = None + if offset is not None: + if length is not None: + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + else: + content_range = 'bytes={0}-'.format(offset) + options = { + 'sharesnapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range} + if previous_sharesnapshot: + try: + options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore + except AttributeError: + try: + options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore + except TypeError: + options['prevsharesnapshot'] = previous_sharesnapshot + options.update(kwargs) + return options + + @distributed_trace + def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A list of valid ranges. + :rtype: List[dict[str, int]] + """ + options = self._get_ranges_options( + offset=offset, + length=length, + **kwargs) + try: + ranges = self._client.file.get_range_list(**options) + except StorageErrorException as error: + process_storage_error(error) + return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] + + def get_ranges_diff( # type: ignore + self, + previous_sharesnapshot, # type: Union[str, Dict[str, Any]] + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + .. versionadded:: 12.6.0 + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :param str previous_sharesnapshot: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous file snapshot to be compared + against a more recent snapshot or the current file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. + The first element are filled file ranges, the 2nd element is cleared file ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_ranges_options( + offset=offset, + length=length, + previous_sharesnapshot=previous_sharesnapshot, + **kwargs) + try: + ranges = self._client.file.get_range_list(**options) + except StorageErrorException as error: + process_storage_error(error) + return get_file_ranges_result(ranges) + + @distributed_trace + def clear_range( # type: ignore + self, offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + try: + return self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> ItemPaged[Handle] + """Lists handles for file. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py new file mode 100644 index 0000000..22b5762 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage import AzureFileStorage +__all__ = ['AzureFileStorage'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py new file mode 100644 index 0000000..3c52986 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import PipelineClient +from msrest import Serializer, Deserializer + +from ._configuration import AzureFileStorageConfiguration +from azure.core.exceptions import map_error +from .operations import ServiceOperations +from .operations import ShareOperations +from .operations import DirectoryOperations +from .operations import FileOperations +from . import models + + +class AzureFileStorage(object): + """AzureFileStorage + + + :ivar service: Service operations + :vartype service: azure.storage.fileshare.operations.ServiceOperations + :ivar share: Share operations + :vartype share: azure.storage.fileshare.operations.ShareOperations + :ivar directory: Directory operations + :vartype directory: azure.storage.fileshare.operations.DirectoryOperations + :ivar file: File operations + :vartype file: azure.storage.fileshare.operations.FileOperations + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + """ + + def __init__(self, version, url, **kwargs): + + base_url = '{url}' + self._config = AzureFileStorageConfiguration(version, url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-02-10' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + self._client.close() + def __enter__(self): + self._client.__enter__() + return self + def __exit__(self, *exc_details): + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py new file mode 100644 index 0000000..d638b1e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .version import VERSION + + +class AzureFileStorageConfiguration(Configuration): + """Configuration for AzureFileStorage + Note that all parameters used to create this instance are saved as instance + attributes. + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + :ivar file_range_write_from_url: Only update is supported: - Update: + Writes the bytes downloaded from the source url into the specified range. + :type file_range_write_from_url: str + """ + + def __init__(self, version, url, **kwargs): + + if version is None: + raise ValueError("Parameter 'version' must not be None.") + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) + self.generate_client_request_id = True + + self.version = version + self.url = url + self.file_range_write_from_url = "update" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py new file mode 100644 index 0000000..942d3c5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage_async import AzureFileStorage +__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py new file mode 100644 index 0000000..c0fcb43 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core import AsyncPipelineClient +from msrest import Serializer, Deserializer + +from ._configuration_async import AzureFileStorageConfiguration +from azure.core.exceptions import map_error +from .operations_async import ServiceOperations +from .operations_async import ShareOperations +from .operations_async import DirectoryOperations +from .operations_async import FileOperations +from .. import models + + +class AzureFileStorage(object): + """AzureFileStorage + + + :ivar service: Service operations + :vartype service: azure.storage.fileshare.aio.operations_async.ServiceOperations + :ivar share: Share operations + :vartype share: azure.storage.fileshare.aio.operations_async.ShareOperations + :ivar directory: Directory operations + :vartype directory: azure.storage.fileshare.aio.operations_async.DirectoryOperations + :ivar file: File operations + :vartype file: azure.storage.fileshare.aio.operations_async.FileOperations + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + """ + + def __init__( + self, version, url, **kwargs): + + base_url = '{url}' + self._config = AzureFileStorageConfiguration(version, url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-02-10' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self): + await self._client.close() + async def __aenter__(self): + await self._client.__aenter__() + return self + async def __aexit__(self, *exc_details): + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py new file mode 100644 index 0000000..75c206e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from ..version import VERSION + + +class AzureFileStorageConfiguration(Configuration): + """Configuration for AzureFileStorage + Note that all parameters used to create this instance are saved as instance + attributes. + + :param version: Specifies the version of the operation to use for this + request. + :type version: str + :param url: The URL of the service account, share, directory or file that + is the target of the desired operation. + :type url: str + :ivar file_range_write_from_url: Only update is supported: - Update: + Writes the bytes downloaded from the source url into the specified range. + :type file_range_write_from_url: str + """ + + def __init__(self, version, url, **kwargs): + + if version is None: + raise ValueError("Parameter 'version' must not be None.") + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + self._configure(**kwargs) + + self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) + self.generate_client_request_id = True + self.accept_language = None + + self.version = version + self.url = url + self.file_range_write_from_url = "update" + + def _configure(self, **kwargs): + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py new file mode 100644 index 0000000..601c709 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py @@ -0,0 +1,22 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations_async import ServiceOperations +from ._share_operations_async import ShareOperations +from ._directory_operations_async import DirectoryOperations +from ._file_operations_async import FileOperations + +__all__ = [ + 'ServiceOperations', + 'ShareOperations', + 'DirectoryOperations', + 'FileOperations', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py new file mode 100644 index 0000000..26a962f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py @@ -0,0 +1,672 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class DirectoryOperations: + """DirectoryOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "directory". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "directory" + + async def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): + """Creates a new directory under the specified share or parent directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}'} + + async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): + """Returns all system properties for the specified directory, and can also + be used to check the existence of a directory. The data returned does + not include the files in the directory or any subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}'} + + async def delete(self, timeout=None, *, cls=None, **kwargs): + """Removes the specified empty directory. Note that the directory must be + empty before it can be deleted. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}'} + + async def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): + """Sets properties on the directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{shareName}/{directory}'} + + async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}'} + + async def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, *, cls=None, **kwargs): + """Returns a list of files or directories under the specified share or + directory. It lists the contents only for a single level of the + directory hierarchy. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListFilesAndDirectoriesSegmentResponse or the result of + cls(response) + :rtype: + ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_files_and_directories_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} + + async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}'} + + async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py new file mode 100644 index 0000000..51d7614 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py @@ -0,0 +1,1671 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class FileOperations: + """FileOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". + :ivar x_ms_copy_action: . Constant value: "abort". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_type = "file" + self.x_ms_copy_action = "abort" + + async def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Creates a new file or replaces a file. Note it only initializes the + file with no content. + + :param file_content_length: Specifies the maximum size for the file, + up to 4 TB. + :type file_content_length: long + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Reads or downloads a file from the system, including its metadata and + properties. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Return file data only from the specified byte range. + :type range: str + :param range_get_content_md5: When this header is set to true and + specified together with the Range header, the service returns the MD5 + hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.download.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Returns all user-defined metadata, standard HTTP properties, and system + properties for the file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def delete(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Sets HTTP headers on the file. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If + the specified byte value is less than the current size of the file, + then all ranges above the specified byte value are cleared. + :type file_content_length: long + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "properties" + + # Construct URL + url = self.set_http_headers.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_content_length is not None: + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def release_lease(self, lease_id, timeout=None, request_id=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the + start and end of the range must be specified. For an update operation, + the range can be up to 4 MB in size. For a clear operation, the range + can be up to the value of the file's full size. The File service + accepts only a single byte range for the Range and 'x-ms-range' + headers, and the byte range must be specified in the following format: + bytes=startByte-endByte. + :type range: str + :param file_range_write: Specify one of the following options: - + Update: Writes the bytes specified by the request body into the + specified range. The Range and Content-Length headers must match to + perform the update. - Clear: Clears the specified range and releases + the space used in storage for that range. To clear a range, set the + Content-Length header to zero, and set the Range header to a value + that indicates the range to clear, up to maximum file size. Possible + values include: 'update', 'clear' + :type file_range_write: str or + ~azure.storage.fileshare.models.FileRangeWriteType + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param optionalbody: Initial data. + :type optionalbody: Generator + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param content_md5: An MD5 hash of the content. This hash is used to + verify the integrity of the data during transport. When the + Content-MD5 header is specified, the File service compares the hash of + the content that has arrived with the header value that was sent. If + the two hashes do not match, the operation will fail with error code + 400 (Bad Request). + :type content_md5: bytearray + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Upload a range of bytes to a file where the contents are read from a + URL. + + :param range: Writes data to the specified byte range in the file. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the + range of bytes that must be read from the copy source. + :type source_content_crc64: bytearray + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + source_if_none_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if source_content_crc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if source_if_match_crc64 is not None: + header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') + if source_if_none_match_crc64 is not None: + header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def get_range_list(self, sharesnapshot=None, prevsharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param prevsharesnapshot: The previous snapshot parameter is an opaque + DateTime value that, when present, specifies the previous snapshot. + :type prevsharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, + inclusively. + :type range: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: ShareFileRangeList or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareFileRangeList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "rangelist" + + # Construct URL + url = self.get_range_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if prevsharesnapshot is not None: + query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ShareFileRangeList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param copy_file_smb_info: Additional parameters for the operation + :type copy_file_smb_info: + ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_permission_copy_mode = None + if copy_file_smb_info is not None: + file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + ignore_read_only = None + if copy_file_smb_info is not None: + ignore_read_only = copy_file_smb_info.ignore_read_only + file_attributes = None + if copy_file_smb_info is not None: + file_attributes = copy_file_smb_info.file_attributes + file_creation_time = None + if copy_file_smb_info is not None: + file_creation_time = copy_file_smb_info.file_creation_time + file_last_write_time = None + if copy_file_smb_info is not None: + file_last_write_time = copy_file_smb_info.file_last_write_time + set_archive_attribute = None + if copy_file_smb_info is not None: + set_archive_attribute = copy_file_smb_info.set_archive_attribute + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.start_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + if file_permission_copy_mode is not None: + header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') + if ignore_read_only is not None: + header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') + if file_attributes is not None: + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + if file_creation_time is not None: + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + if file_last_write_time is not None: + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if set_archive_attribute is not None: + header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Aborts a pending Copy File operation, and leaves a destination file + with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id + header of the original Copy File operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "copy" + + # Construct URL + url = self.abort_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, *, cls=None, **kwargs): + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, *, cls=None, **kwargs): + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py new file mode 100644 index 0000000..c4e40f1 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py @@ -0,0 +1,253 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "service". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "service" + + async def set_properties(self, storage_service_properties, timeout=None, *, cls=None, **kwargs): + """Sets properties for a storage account's File service endpoint, + including properties for Storage Analytics metrics and CORS + (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: + ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/'} + + async def get_properties(self, timeout=None, *, cls=None, **kwargs): + """Gets the properties of a storage account's File service, including + properties for Storage Analytics metrics and CORS (Cross-Origin + Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceProperties', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_properties.metadata = {'url': '/'} + + async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, *, cls=None, **kwargs): + """The List Shares Segment operation returns a list of the shares and + share snapshots under the specified account. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListSharesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_shares_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListSharesResponse', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py new file mode 100644 index 0000000..915f757 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py @@ -0,0 +1,1315 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class ShareOperations: + """ShareOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "share". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "share" + + async def create(self, timeout=None, metadata=None, quota=None, *, cls=None, **kwargs): + """Creates a new share under the specified account. If the share with the + same name already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}'} + + async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Returns all user-defined metadata and system properties for the + specified share or share snapshot. The data returned does not include + the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), + 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), + 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), + 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), + 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}'} + + async def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Operation marks the specified share or share snapshot for deletion. The + share or share snapshot and any files contained within it are later + deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the + base share and all of its snapshots. Possible values include: + 'include' + :type delete_snapshots: str or + ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}'} + + async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{shareName}'} + + async def release_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{shareName}'} + + async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{shareName}'} + + async def renew_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "renew" + + # Construct URL + url = self.renew_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + renew_lease.metadata = {'url': '/{shareName}'} + + async def break_lease(self, timeout=None, break_period=None, request_id=None, sharesnapshot=None, lease_access_conditions=None, *, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param break_period: For a break operation, proposed duration the + lease should continue before it is broken, in seconds, between 0 and + 60. This break period is only used if it is shorter than the time + remaining on the lease. If longer, the time remaining on the lease is + used. A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break period. + If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, + and an infinite lease breaks immediately. + :type break_period: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{shareName}'} + + async def create_snapshot(self, timeout=None, metadata=None, *, cls=None, **kwargs): + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "snapshot" + + # Construct URL + url = self.create_snapshot.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_snapshot.metadata = {'url': '/{shareName}'} + + async def create_permission(self, share_permission, timeout=None, *, cls=None, **kwargs): + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the + share level. + :type share_permission: + ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.create_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_permission.metadata = {'url': '/{shareName}'} + + async def get_permission(self, file_permission_key, timeout=None, *, cls=None, **kwargs): + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the + directory/file. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: SharePermission or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.get_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SharePermission', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_permission.metadata = {'url': '/{shareName}'} + + async def set_quota(self, timeout=None, quota=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Sets quota for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "properties" + + # Construct URL + url = self.set_quota.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_quota.metadata = {'url': '/{shareName}'} + + async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}'} + + async def get_access_policy(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Returns information about stored access policies specified on the + share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "acl" + + # Construct URL + url = self.get_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[SignedIdentifier]', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_access_policy.metadata = {'url': '/{shareName}'} + + async def set_access_policy(self, share_acl=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Sets a stored access policy for use with shared access signatures. + + :param share_acl: The ACL for the share. + :type share_acl: + list[~azure.storage.fileshare.models.SignedIdentifier] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "acl" + + # Construct URL + url = self.set_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} + if share_acl is not None: + body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{shareName}'} + + async def get_statistics(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: ShareStats or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "stats" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ShareStats', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_statistics.metadata = {'url': '/{shareName}'} + + async def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, *, cls=None, **kwargs): + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_share_name: Specifies the name of the + preivously-deleted share. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the + preivously-deleted share. + :type deleted_share_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_share_name is not None: + header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') + if deleted_share_version is not None: + header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py new file mode 100644 index 0000000..163c6b6 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import ClearRange + from ._models_py3 import CopyFileSmbInfo + from ._models_py3 import CorsRule + from ._models_py3 import DirectoryItem + from ._models_py3 import FileHTTPHeaders + from ._models_py3 import FileItem + from ._models_py3 import FileProperty + from ._models_py3 import FileRange + from ._models_py3 import FilesAndDirectoriesListSegment + from ._models_py3 import HandleItem + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListFilesAndDirectoriesSegmentResponse + from ._models_py3 import ListHandlesResponse + from ._models_py3 import ListSharesResponse + from ._models_py3 import Metrics + from ._models_py3 import RetentionPolicy + from ._models_py3 import ShareFileRangeList + from ._models_py3 import ShareItem + from ._models_py3 import SharePermission + from ._models_py3 import ShareProperties + from ._models_py3 import ShareProtocolSettings + from ._models_py3 import ShareSmbSettings + from ._models_py3 import ShareStats + from ._models_py3 import SignedIdentifier + from ._models_py3 import SmbMultichannel + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StorageError, StorageErrorException + from ._models_py3 import StorageServiceProperties +except (SyntaxError, ImportError): + from ._models import AccessPolicy + from ._models import ClearRange + from ._models import CopyFileSmbInfo + from ._models import CorsRule + from ._models import DirectoryItem + from ._models import FileHTTPHeaders + from ._models import FileItem + from ._models import FileProperty + from ._models import FileRange + from ._models import FilesAndDirectoriesListSegment + from ._models import HandleItem + from ._models import LeaseAccessConditions + from ._models import ListFilesAndDirectoriesSegmentResponse + from ._models import ListHandlesResponse + from ._models import ListSharesResponse + from ._models import Metrics + from ._models import RetentionPolicy + from ._models import ShareFileRangeList + from ._models import ShareItem + from ._models import SharePermission + from ._models import ShareProperties + from ._models import ShareProtocolSettings + from ._models import ShareSmbSettings + from ._models import ShareStats + from ._models import SignedIdentifier + from ._models import SmbMultichannel + from ._models import SourceModifiedAccessConditions + from ._models import StorageError, StorageErrorException + from ._models import StorageServiceProperties +from ._azure_file_storage_enums import ( + CopyStatusType, + DeleteSnapshotsOptionType, + FileRangeWriteType, + LeaseDurationType, + LeaseStateType, + LeaseStatusType, + ListSharesIncludeType, + PermissionCopyModeType, + StorageErrorCode, +) + +__all__ = [ + 'AccessPolicy', + 'ClearRange', + 'CopyFileSmbInfo', + 'CorsRule', + 'DirectoryItem', + 'FileHTTPHeaders', + 'FileItem', + 'FileProperty', + 'FileRange', + 'FilesAndDirectoriesListSegment', + 'HandleItem', + 'LeaseAccessConditions', + 'ListFilesAndDirectoriesSegmentResponse', + 'ListHandlesResponse', + 'ListSharesResponse', + 'Metrics', + 'RetentionPolicy', + 'ShareFileRangeList', + 'ShareItem', + 'SharePermission', + 'ShareProperties', + 'ShareProtocolSettings', + 'ShareSmbSettings', + 'ShareStats', + 'SignedIdentifier', + 'SmbMultichannel', + 'SourceModifiedAccessConditions', + 'StorageError', 'StorageErrorException', + 'StorageServiceProperties', + 'StorageErrorCode', + 'LeaseDurationType', + 'LeaseStateType', + 'LeaseStatusType', + 'PermissionCopyModeType', + 'DeleteSnapshotsOptionType', + 'ListSharesIncludeType', + 'CopyStatusType', + 'FileRangeWriteType', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py new file mode 100644 index 0000000..3a1fab6 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py @@ -0,0 +1,135 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class StorageErrorCode(str, Enum): + + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" + authorization_protocol_mismatch = "AuthorizationProtocolMismatch" + authorization_permission_mismatch = "AuthorizationPermissionMismatch" + authorization_service_mismatch = "AuthorizationServiceMismatch" + authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + + +class LeaseDurationType(str, Enum): + + infinite = "infinite" + fixed = "fixed" + + +class LeaseStateType(str, Enum): + + available = "available" + leased = "leased" + expired = "expired" + breaking = "breaking" + broken = "broken" + + +class LeaseStatusType(str, Enum): + + locked = "locked" + unlocked = "unlocked" + + +class PermissionCopyModeType(str, Enum): + + source = "source" + override = "override" + + +class DeleteSnapshotsOptionType(str, Enum): + + include = "include" + + +class ListSharesIncludeType(str, Enum): + + snapshots = "snapshots" + metadata = "metadata" + deleted = "deleted" + + +class CopyStatusType(str, Enum): + + pending = "pending" + success = "success" + aborted = "aborted" + failed = "failed" + + +class FileRangeWriteType(str, Enum): + + update = "update" + clear = "clear" diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py new file mode 100644 index 0000000..9f6b2db --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py @@ -0,0 +1,1021 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + :param start: The date-time the policy is active. + :type start: str + :param expiry: The date-time the policy expires. + :type expiry: str + :param permission: The permissions for the ACL policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class ClearRange(Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__(self, **kwargs): + super(ClearRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + + +class CopyFileSmbInfo(Model): + """Additional parameters for start_copy operation. + + :param file_permission_copy_mode: Specifies the option to copy file + security descriptor from source file or to set it using the value which is + defined by the header value of x-ms-file-permission or + x-ms-file-permission-key. Possible values include: 'source', 'override' + :type file_permission_copy_mode: str or + ~azure.storage.fileshare.models.PermissionCopyModeType + :param ignore_read_only: Specifies the option to overwrite the target file + if it already exists and has read-only attribute set. + :type ignore_read_only: bool + :param file_attributes: Specifies either the option to copy file + attributes from a source file(source) to a target file or a list of + attributes to set on a target file. + :type file_attributes: str + :param file_creation_time: Specifies either the option to copy file + creation time from a source file(source) to a target file or a time value + in ISO 8601 format to set as creation time on a target file. + :type file_creation_time: str + :param file_last_write_time: Specifies either the option to copy file last + write time from a source file(source) to a target file or a time value in + ISO 8601 format to set as last write time on a target file. + :type file_last_write_time: str + :param set_archive_attribute: Specifies the option to set archive + attribute on a target file. True means archive attribute will be set on a + target file despite attribute overrides or a source file state. + :type set_archive_attribute: bool + """ + + _attribute_map = { + 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, + 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, + 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, + 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, + 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, + 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CopyFileSmbInfo, self).__init__(**kwargs) + self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) + self.ignore_read_only = kwargs.get('ignore_read_only', None) + self.file_attributes = kwargs.get('file_attributes', None) + self.file_creation_time = kwargs.get('file_creation_time', None) + self.file_last_write_time = kwargs.get('file_last_write_time', None) + self.set_archive_attribute = kwargs.get('set_archive_attribute', None) + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. The request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs.get('allowed_origins', None) + self.allowed_methods = kwargs.get('allowed_methods', None) + self.allowed_headers = kwargs.get('allowed_headers', None) + self.exposed_headers = kwargs.get('exposed_headers', None) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) + + +class DirectoryItem(Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + } + _xml_map = { + 'name': 'Directory' + } + + def __init__(self, **kwargs): + super(DirectoryItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + + +class FileHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: File_create, + File_set_http_headers. + + :param file_content_type: Sets the MIME content type of the file. The + default type is 'application/octet-stream'. + :type file_content_type: str + :param file_content_encoding: Specifies which content encodings have been + applied to the file. + :type file_content_encoding: str + :param file_content_language: Specifies the natural languages used by this + resource. + :type file_content_language: str + :param file_cache_control: Sets the file's cache control. The File service + stores this value but does not use or modify it. + :type file_cache_control: str + :param file_content_md5: Sets the file's MD5 hash. + :type file_content_md5: bytearray + :param file_content_disposition: Sets the file's Content-Disposition + header. + :type file_content_disposition: str + """ + + _attribute_map = { + 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, + 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, + 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, + 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, + 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, + 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(FileHTTPHeaders, self).__init__(**kwargs) + self.file_content_type = kwargs.get('file_content_type', None) + self.file_content_encoding = kwargs.get('file_content_encoding', None) + self.file_content_language = kwargs.get('file_content_language', None) + self.file_cache_control = kwargs.get('file_cache_control', None) + self.file_content_md5 = kwargs.get('file_content_md5', None) + self.file_content_disposition = kwargs.get('file_content_disposition', None) + + +class FileItem(Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.FileProperty + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, + } + _xml_map = { + 'name': 'File' + } + + def __init__(self, **kwargs): + super(FileItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.properties = kwargs.get('properties', None) + + +class FileProperty(Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :param content_length: Required. Content length of the file. This value + may not be up-to-date since an SMB client may have modified the file + locally. The value of Content-Length may not reflect that fact until the + handle is closed or the op-lock is broken. To retrieve current property + values, call Get File Properties. + :type content_length: long + """ + + _validation = { + 'content_length': {'required': True}, + } + + _attribute_map = { + 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(FileProperty, self).__init__(**kwargs) + self.content_length = kwargs.get('content_length', None) + + +class FileRange(Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. Start of the range. + :type start: long + :param end: Required. End of the range. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'Range' + } + + def __init__(self, **kwargs): + super(FileRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + + +class FilesAndDirectoriesListSegment(Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :param directory_items: Required. + :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :param file_items: Required. + :type file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + 'directory_items': {'required': True}, + 'file_items': {'required': True}, + } + + _attribute_map = { + 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, + 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, + } + _xml_map = { + 'name': 'Entries' + } + + def __init__(self, **kwargs): + super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) + self.directory_items = kwargs.get('directory_items', None) + self.file_items = kwargs.get('file_items', None) + + +class HandleItem(Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :param handle_id: Required. XSMB service handle ID + :type handle_id: str + :param path: Required. File or directory name including full path starting + from share root + :type path: str + :param file_id: Required. FileId uniquely identifies the file or + directory. + :type file_id: str + :param parent_id: ParentId uniquely identifies the parent directory of the + object. + :type parent_id: str + :param session_id: Required. SMB session ID in context of which the file + handle was opened + :type session_id: str + :param client_ip: Required. Client IP that opened the handle + :type client_ip: str + :param open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :type open_time: datetime + :param last_reconnect_time: Time handle was last connected to (UTC) + :type last_reconnect_time: datetime + """ + + _validation = { + 'handle_id': {'required': True}, + 'path': {'required': True}, + 'file_id': {'required': True}, + 'session_id': {'required': True}, + 'client_ip': {'required': True}, + 'open_time': {'required': True}, + } + + _attribute_map = { + 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, + 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, + 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, + 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, + 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, + 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, + 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, + 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, + } + _xml_map = { + 'name': 'Handle' + } + + def __init__(self, **kwargs): + super(HandleItem, self).__init__(**kwargs) + self.handle_id = kwargs.get('handle_id', None) + self.path = kwargs.get('path', None) + self.file_id = kwargs.get('file_id', None) + self.parent_id = kwargs.get('parent_id', None) + self.session_id = kwargs.get('session_id', None) + self.client_ip = kwargs.get('client_ip', None) + self.open_time = kwargs.get('open_time', None) + self.last_reconnect_time = kwargs.get('last_reconnect_time', None) + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListFilesAndDirectoriesSegmentResponse(Model): + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param share_name: Required. + :type share_name: str + :param share_snapshot: + :type share_snapshot: str + :param directory_path: Required. + :type directory_path: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: + ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'share_name': {'required': True}, + 'directory_path': {'required': True}, + 'prefix': {'required': True}, + 'segment': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, + 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, + 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.share_name = kwargs.get('share_name', None) + self.share_snapshot = kwargs.get('share_snapshot', None) + self.directory_path = kwargs.get('directory_path', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.segment = kwargs.get('segment', None) + self.next_marker = kwargs.get('next_marker', None) + + +class ListHandlesResponse(Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :param handle_list: + :type handle_list: list[~azure.storage.fileshare.models.HandleItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListHandlesResponse, self).__init__(**kwargs) + self.handle_list = kwargs.get('handle_list', None) + self.next_marker = kwargs.get('next_marker', None) + + +class ListSharesResponse(Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param share_items: + :type share_items: list[~azure.storage.fileshare.models.ShareItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListSharesResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.share_items = kwargs.get('share_items', None) + self.next_marker = kwargs.get('next_marker', None) + + +class Metrics(Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + File service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.enabled = kwargs.get('enabled', None) + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class RetentionPolicy(Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the File service. If false, metrics data is retained, and the user is + responsible for deleting it. + :type enabled: bool + :param days: Indicates the number of days that metrics data should be + retained. All data older than this value will be deleted. Metrics data is + deleted on a best-effort basis after the retention period expires. + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'maximum': 365, 'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + self.days = kwargs.get('days', None) + + +class ShareFileRangeList(Model): + """The list of file ranges. + + :param ranges: + :type ranges: list[~azure.storage.fileshare.models.FileRange] + :param clear_ranges: + :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] + """ + + _attribute_map = { + 'ranges': {'key': 'Ranges', 'type': '[FileRange]', 'xml': {'name': 'Ranges', 'itemsName': 'Range'}}, + 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]', 'xml': {'name': 'ClearRanges', 'itemsName': 'ClearRange'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareFileRangeList, self).__init__(**kwargs) + self.ranges = kwargs.get('ranges', None) + self.clear_ranges = kwargs.get('clear_ranges', None) + + +class ShareItem(Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param snapshot: + :type snapshot: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.ShareProperties + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Share' + } + + def __init__(self, **kwargs): + super(ShareItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.snapshot = kwargs.get('snapshot', None) + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) + self.properties = kwargs.get('properties', None) + self.metadata = kwargs.get('metadata', None) + + +class SharePermission(Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :param permission: Required. The permission in the Security Descriptor + Definition Language (SDDL). + :type permission: str + """ + + _validation = { + 'permission': {'required': True}, + } + + _attribute_map = { + 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SharePermission, self).__init__(**kwargs) + self.permission = kwargs.get('permission', None) + + +class ShareProperties(Model): + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param quota: Required. + :type quota: int + :param provisioned_iops: + :type provisioned_iops: int + :param provisioned_ingress_mbps: + :type provisioned_ingress_mbps: int + :param provisioned_egress_mbps: + :type provisioned_egress_mbps: int + :param next_allowed_quota_downgrade_time: + :type next_allowed_quota_downgrade_time: datetime + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param lease_status: Possible values include: 'locked', 'unlocked' + :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType + :param lease_state: Possible values include: 'available', 'leased', + 'expired', 'breaking', 'broken' + :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType + :param lease_duration: Possible values include: 'infinite', 'fixed' + :type lease_duration: str or + ~azure.storage.fileshare.models.LeaseDurationType + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + 'quota': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, + 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, + 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, + 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, + 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, + 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareProperties, self).__init__(**kwargs) + self.last_modified = kwargs.get('last_modified', None) + self.etag = kwargs.get('etag', None) + self.quota = kwargs.get('quota', None) + self.provisioned_iops = kwargs.get('provisioned_iops', None) + self.provisioned_ingress_mbps = kwargs.get('provisioned_ingress_mbps', None) + self.provisioned_egress_mbps = kwargs.get('provisioned_egress_mbps', None) + self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + + +class ShareProtocolSettings(Model): + """Protocol settings. + + :param smb: Settings for SMB protocol. + :type smb: ~azure.storage.fileshare.models.ShareSmbSettings + """ + + _attribute_map = { + 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareProtocolSettings, self).__init__(**kwargs) + self.smb = kwargs.get('smb', None) + + +class ShareSmbSettings(Model): + """Settings for SMB protocol. + + :param multichannel: Settings for SMB Multichannel. + :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel + """ + + _attribute_map = { + 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel', 'xml': {'name': 'Multichannel'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareSmbSettings, self).__init__(**kwargs) + self.multichannel = kwargs.get('multichannel', None) + + +class ShareStats(Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :param share_usage_bytes: Required. The approximate size of the data + stored in bytes. Note that this value may not include all recently created + or recently resized files. + :type share_usage_bytes: int + """ + + _validation = { + 'share_usage_bytes': {'required': True}, + } + + _attribute_map = { + 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(ShareStats, self).__init__(**kwargs) + self.share_usage_bytes = kwargs.get('share_usage_bytes', None) + + +class SignedIdentifier(Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique id. + :type id: str + :param access_policy: The access policy. + :type access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.access_policy = kwargs.get('access_policy', None) + + +class SmbMultichannel(Model): + """Settings for SMB multichannel. + + :param enabled: If SMB multichannel is enabled. + :type enabled: bool + """ + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + } + _xml_map = { + 'name': 'Multichannel' + } + + def __init__(self, **kwargs): + super(SmbMultichannel, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for upload_range_from_url operation. + + :param source_if_match_crc64: Specify the crc64 value to operate only on + range with a matching crc64 checksum. + :type source_if_match_crc64: bytearray + :param source_if_none_match_crc64: Specify the crc64 value to operate only + on range without a matching crc64 checksum. + :type source_if_none_match_crc64: bytearray + """ + + _attribute_map = { + 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, + 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) + self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage service properties. + + :param hour_metrics: A summary of request statistics grouped by API in + hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.models.Metrics + :param minute_metrics: A summary of request statistics grouped by API in + minute aggregates for files. + :type minute_metrics: ~azure.storage.fileshare.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.fileshare.models.CorsRule] + :param protocol: Protocol settings + :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings + """ + + _attribute_map = { + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageServiceProperties, self).__init__(**kwargs) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) + self.protocol = kwargs.get('protocol', None) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py new file mode 100644 index 0000000..7fe9c4a --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py @@ -0,0 +1,1021 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + :param start: The date-time the policy is active. + :type start: str + :param expiry: The date-time the policy expires. + :type expiry: str + :param permission: The permissions for the ACL policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class ClearRange(Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(ClearRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class CopyFileSmbInfo(Model): + """Additional parameters for start_copy operation. + + :param file_permission_copy_mode: Specifies the option to copy file + security descriptor from source file or to set it using the value which is + defined by the header value of x-ms-file-permission or + x-ms-file-permission-key. Possible values include: 'source', 'override' + :type file_permission_copy_mode: str or + ~azure.storage.fileshare.models.PermissionCopyModeType + :param ignore_read_only: Specifies the option to overwrite the target file + if it already exists and has read-only attribute set. + :type ignore_read_only: bool + :param file_attributes: Specifies either the option to copy file + attributes from a source file(source) to a target file or a list of + attributes to set on a target file. + :type file_attributes: str + :param file_creation_time: Specifies either the option to copy file + creation time from a source file(source) to a target file or a time value + in ISO 8601 format to set as creation time on a target file. + :type file_creation_time: str + :param file_last_write_time: Specifies either the option to copy file last + write time from a source file(source) to a target file or a time value in + ISO 8601 format to set as last write time on a target file. + :type file_last_write_time: str + :param set_archive_attribute: Specifies the option to set archive + attribute on a target file. True means archive attribute will be set on a + target file despite attribute overrides or a source file state. + :type set_archive_attribute: bool + """ + + _attribute_map = { + 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, + 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, + 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, + 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, + 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, + 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, + } + _xml_map = { + } + + def __init__(self, *, file_permission_copy_mode=None, ignore_read_only: bool=None, file_attributes: str=None, file_creation_time: str=None, file_last_write_time: str=None, set_archive_attribute: bool=None, **kwargs) -> None: + super(CopyFileSmbInfo, self).__init__(**kwargs) + self.file_permission_copy_mode = file_permission_copy_mode + self.ignore_read_only = ignore_read_only + self.file_attributes = file_attributes + self.file_creation_time = file_creation_time + self.file_last_write_time = file_last_write_time + self.set_archive_attribute = set_archive_attribute + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. The request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class DirectoryItem(Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + } + _xml_map = { + 'name': 'Directory' + } + + def __init__(self, *, name: str, **kwargs) -> None: + super(DirectoryItem, self).__init__(**kwargs) + self.name = name + + +class FileHTTPHeaders(Model): + """Additional parameters for a set of operations, such as: File_create, + File_set_http_headers. + + :param file_content_type: Sets the MIME content type of the file. The + default type is 'application/octet-stream'. + :type file_content_type: str + :param file_content_encoding: Specifies which content encodings have been + applied to the file. + :type file_content_encoding: str + :param file_content_language: Specifies the natural languages used by this + resource. + :type file_content_language: str + :param file_cache_control: Sets the file's cache control. The File service + stores this value but does not use or modify it. + :type file_cache_control: str + :param file_content_md5: Sets the file's MD5 hash. + :type file_content_md5: bytearray + :param file_content_disposition: Sets the file's Content-Disposition + header. + :type file_content_disposition: str + """ + + _attribute_map = { + 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, + 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, + 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, + 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, + 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, + 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, + } + _xml_map = { + } + + def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None: + super(FileHTTPHeaders, self).__init__(**kwargs) + self.file_content_type = file_content_type + self.file_content_encoding = file_content_encoding + self.file_content_language = file_content_language + self.file_cache_control = file_cache_control + self.file_content_md5 = file_content_md5 + self.file_content_disposition = file_content_disposition + + +class FileItem(Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.FileProperty + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, + } + _xml_map = { + 'name': 'File' + } + + def __init__(self, *, name: str, properties, **kwargs) -> None: + super(FileItem, self).__init__(**kwargs) + self.name = name + self.properties = properties + + +class FileProperty(Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :param content_length: Required. Content length of the file. This value + may not be up-to-date since an SMB client may have modified the file + locally. The value of Content-Length may not reflect that fact until the + handle is closed or the op-lock is broken. To retrieve current property + values, call Get File Properties. + :type content_length: long + """ + + _validation = { + 'content_length': {'required': True}, + } + + _attribute_map = { + 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, + } + _xml_map = { + } + + def __init__(self, *, content_length: int, **kwargs) -> None: + super(FileProperty, self).__init__(**kwargs) + self.content_length = content_length + + +class FileRange(Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. Start of the range. + :type start: long + :param end: Required. End of the range. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'Range' + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(FileRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class FilesAndDirectoriesListSegment(Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :param directory_items: Required. + :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :param file_items: Required. + :type file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + 'directory_items': {'required': True}, + 'file_items': {'required': True}, + } + + _attribute_map = { + 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, + 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, + } + _xml_map = { + 'name': 'Entries' + } + + def __init__(self, *, directory_items, file_items, **kwargs) -> None: + super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) + self.directory_items = directory_items + self.file_items = file_items + + +class HandleItem(Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :param handle_id: Required. XSMB service handle ID + :type handle_id: str + :param path: Required. File or directory name including full path starting + from share root + :type path: str + :param file_id: Required. FileId uniquely identifies the file or + directory. + :type file_id: str + :param parent_id: ParentId uniquely identifies the parent directory of the + object. + :type parent_id: str + :param session_id: Required. SMB session ID in context of which the file + handle was opened + :type session_id: str + :param client_ip: Required. Client IP that opened the handle + :type client_ip: str + :param open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :type open_time: datetime + :param last_reconnect_time: Time handle was last connected to (UTC) + :type last_reconnect_time: datetime + """ + + _validation = { + 'handle_id': {'required': True}, + 'path': {'required': True}, + 'file_id': {'required': True}, + 'session_id': {'required': True}, + 'client_ip': {'required': True}, + 'open_time': {'required': True}, + } + + _attribute_map = { + 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, + 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, + 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, + 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, + 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, + 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, + 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, + 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, + } + _xml_map = { + 'name': 'Handle' + } + + def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None: + super(HandleItem, self).__init__(**kwargs) + self.handle_id = handle_id + self.path = path + self.file_id = file_id + self.parent_id = parent_id + self.session_id = session_id + self.client_ip = client_ip + self.open_time = open_time + self.last_reconnect_time = last_reconnect_time + + +class LeaseAccessConditions(Model): + """Additional parameters for a set of operations. + + :param lease_id: If specified, the operation only succeeds if the + resource's lease is active and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, + } + _xml_map = { + } + + def __init__(self, *, lease_id: str=None, **kwargs) -> None: + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListFilesAndDirectoriesSegmentResponse(Model): + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param share_name: Required. + :type share_name: str + :param share_snapshot: + :type share_snapshot: str + :param directory_path: Required. + :type directory_path: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: + ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'share_name': {'required': True}, + 'directory_path': {'required': True}, + 'prefix': {'required': True}, + 'segment': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, + 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, + 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None: + super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.share_name = share_name + self.share_snapshot = share_snapshot + self.directory_path = directory_path + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + + +class ListHandlesResponse(Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :param handle_list: + :type handle_list: list[~azure.storage.fileshare.models.HandleItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None: + super(ListHandlesResponse, self).__init__(**kwargs) + self.handle_list = handle_list + self.next_marker = next_marker + + +class ListSharesResponse(Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param share_items: + :type share_items: list[~azure.storage.fileshare.models.ShareItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None: + super(ListSharesResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.share_items = share_items + self.next_marker = next_marker + + +class Metrics(Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + File service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None: + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class RetentionPolicy(Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the File service. If false, metrics data is retained, and the user is + responsible for deleting it. + :type enabled: bool + :param days: Indicates the number of days that metrics data should be + retained. All data older than this value will be deleted. Metrics data is + deleted on a best-effort basis after the retention period expires. + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'maximum': 365, 'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + + +class ShareFileRangeList(Model): + """The list of file ranges. + + :param ranges: + :type ranges: list[~azure.storage.fileshare.models.FileRange] + :param clear_ranges: + :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] + """ + + _attribute_map = { + 'ranges': {'key': 'Ranges', 'type': '[FileRange]', 'xml': {'name': 'Ranges', 'itemsName': 'Range'}}, + 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]', 'xml': {'name': 'ClearRanges', 'itemsName': 'ClearRange'}}, + } + _xml_map = { + } + + def __init__(self, *, ranges=None, clear_ranges=None, **kwargs) -> None: + super(ShareFileRangeList, self).__init__(**kwargs) + self.ranges = ranges + self.clear_ranges = clear_ranges + + +class ShareItem(Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param snapshot: + :type snapshot: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. + :type properties: ~azure.storage.fileshare.models.ShareProperties + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Share' + } + + def __init__(self, *, name: str, properties, snapshot: str=None, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: + super(ShareItem, self).__init__(**kwargs) + self.name = name + self.snapshot = snapshot + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class SharePermission(Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :param permission: Required. The permission in the Security Descriptor + Definition Language (SDDL). + :type permission: str + """ + + _validation = { + 'permission': {'required': True}, + } + + _attribute_map = { + 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, + } + _xml_map = { + } + + def __init__(self, *, permission: str, **kwargs) -> None: + super(SharePermission, self).__init__(**kwargs) + self.permission = permission + + +class ShareProperties(Model): + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: datetime + :param etag: Required. + :type etag: str + :param quota: Required. + :type quota: int + :param provisioned_iops: + :type provisioned_iops: int + :param provisioned_ingress_mbps: + :type provisioned_ingress_mbps: int + :param provisioned_egress_mbps: + :type provisioned_egress_mbps: int + :param next_allowed_quota_downgrade_time: + :type next_allowed_quota_downgrade_time: datetime + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param lease_status: Possible values include: 'locked', 'unlocked' + :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType + :param lease_state: Possible values include: 'available', 'leased', + 'expired', 'breaking', 'broken' + :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType + :param lease_duration: Possible values include: 'infinite', 'fixed' + :type lease_duration: str or + ~azure.storage.fileshare.models.LeaseDurationType + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + 'quota': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, + 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, + 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, + 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, + 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, + 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, + 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, + 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, + } + _xml_map = { + } + + def __init__(self, *, last_modified, etag: str, quota: int, provisioned_iops: int=None, provisioned_ingress_mbps: int=None, provisioned_egress_mbps: int=None, next_allowed_quota_downgrade_time=None, deleted_time=None, remaining_retention_days: int=None, lease_status=None, lease_state=None, lease_duration=None, **kwargs) -> None: + super(ShareProperties, self).__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.quota = quota + self.provisioned_iops = provisioned_iops + self.provisioned_ingress_mbps = provisioned_ingress_mbps + self.provisioned_egress_mbps = provisioned_egress_mbps + self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + + +class ShareProtocolSettings(Model): + """Protocol settings. + + :param smb: Settings for SMB protocol. + :type smb: ~azure.storage.fileshare.models.ShareSmbSettings + """ + + _attribute_map = { + 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}}, + } + _xml_map = { + } + + def __init__(self, *, smb=None, **kwargs) -> None: + super(ShareProtocolSettings, self).__init__(**kwargs) + self.smb = smb + + +class ShareSmbSettings(Model): + """Settings for SMB protocol. + + :param multichannel: Settings for SMB Multichannel. + :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel + """ + + _attribute_map = { + 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel', 'xml': {'name': 'Multichannel'}}, + } + _xml_map = { + } + + def __init__(self, *, multichannel=None, **kwargs) -> None: + super(ShareSmbSettings, self).__init__(**kwargs) + self.multichannel = multichannel + + +class ShareStats(Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :param share_usage_bytes: Required. The approximate size of the data + stored in bytes. Note that this value may not include all recently created + or recently resized files. + :type share_usage_bytes: int + """ + + _validation = { + 'share_usage_bytes': {'required': True}, + } + + _attribute_map = { + 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, + } + _xml_map = { + } + + def __init__(self, *, share_usage_bytes: int, **kwargs) -> None: + super(ShareStats, self).__init__(**kwargs) + self.share_usage_bytes = share_usage_bytes + + +class SignedIdentifier(Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique id. + :type id: str + :param access_policy: The access policy. + :type access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SmbMultichannel(Model): + """Settings for SMB multichannel. + + :param enabled: If SMB multichannel is enabled. + :type enabled: bool + """ + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + } + _xml_map = { + 'name': 'Multichannel' + } + + def __init__(self, *, enabled: bool=None, **kwargs) -> None: + super(SmbMultichannel, self).__init__(**kwargs) + self.enabled = enabled + + +class SourceModifiedAccessConditions(Model): + """Additional parameters for upload_range_from_url operation. + + :param source_if_match_crc64: Specify the crc64 value to operate only on + range with a matching crc64 checksum. + :type source_if_match_crc64: bytearray + :param source_if_none_match_crc64: Specify the crc64 value to operate only + on range without a matching crc64 checksum. + :type source_if_none_match_crc64: bytearray + """ + + _attribute_map = { + 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, + 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, + } + _xml_map = { + } + + def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None: + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match_crc64 = source_if_match_crc64 + self.source_if_none_match_crc64 = source_if_none_match_crc64 + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, *, message: str=None, **kwargs) -> None: + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage service properties. + + :param hour_metrics: A summary of request statistics grouped by API in + hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.models.Metrics + :param minute_metrics: A summary of request statistics grouped by API in + minute aggregates for files. + :type minute_metrics: ~azure.storage.fileshare.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.fileshare.models.CorsRule] + :param protocol: Protocol settings + :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings + """ + + _attribute_map = { + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}}, + } + _xml_map = { + } + + def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, protocol=None, **kwargs) -> None: + super(StorageServiceProperties, self).__init__(**kwargs) + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.protocol = protocol diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py new file mode 100644 index 0000000..65680c9 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py @@ -0,0 +1,22 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._share_operations import ShareOperations +from ._directory_operations import DirectoryOperations +from ._file_operations import FileOperations + +__all__ = [ + 'ServiceOperations', + 'ShareOperations', + 'DirectoryOperations', + 'FileOperations', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py new file mode 100644 index 0000000..c38bc8d --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py @@ -0,0 +1,672 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class DirectoryOperations(object): + """DirectoryOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "directory". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "directory" + + def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): + """Creates a new directory under the specified share or parent directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}'} + + def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): + """Returns all system properties for the specified directory, and can also + be used to check the existence of a directory. The data returned does + not include the files in the directory or any subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}'} + + def delete(self, timeout=None, cls=None, **kwargs): + """Removes the specified empty directory. Note that the directory must be + empty before it can be deleted. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}'} + + def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): + """Sets properties on the directory. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/{shareName}/{directory}'} + + def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}'} + + def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, cls=None, **kwargs): + """Returns a list of files or directories under the specified share or + directory. It lists the contents only for a single level of the + directory hierarchy. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListFilesAndDirectoriesSegmentResponse or the result of + cls(response) + :rtype: + ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_files_and_directories_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} + + def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}'} + + def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory + specified in the URI, its files, its subdirectories and their files. + :type recursive: bool + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py new file mode 100644 index 0000000..48df932 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py @@ -0,0 +1,1670 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class FileOperations(object): + """FileOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". + :ivar x_ms_copy_action: . Constant value: "abort". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.x_ms_type = "file" + self.x_ms_copy_action = "abort" + + def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): + """Creates a new file or replaces a file. Note it only initializes the + file with no content. + + :param file_content_length: Specifies the maximum size for the file, + up to 4 TB. + :type file_content_length: long + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, cls=None, **kwargs): + """Reads or downloads a file from the system, including its metadata and + properties. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Return file data only from the specified byte range. + :type range: str + :param range_get_content_md5: When this header is set to true and + specified together with the Range header, the service returns the MD5 + hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.download.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Returns all user-defined metadata, standard HTTP properties, and system + properties for the file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def delete(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): + """Sets HTTP headers on the file. + + :param file_attributes: If specified, the provided file attributes + shall be set. Default value: ‘Archive’ for file and ‘Directory’ for + directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. + Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. + Default value: Now. + :type file_last_write_time: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If + the specified byte value is less than the current size of the file, + then all ranges above the specified byte value are cleared. + :type file_content_length: long + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_http_headers: Additional parameters for the operation + :type file_http_headers: + ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_content_type = None + if file_http_headers is not None: + file_content_type = file_http_headers.file_content_type + file_content_encoding = None + if file_http_headers is not None: + file_content_encoding = file_http_headers.file_content_encoding + file_content_language = None + if file_http_headers is not None: + file_content_language = file_http_headers.file_content_language + file_cache_control = None + if file_http_headers is not None: + file_cache_control = file_http_headers.file_cache_control + file_content_md5 = None + if file_http_headers is not None: + file_content_md5 = file_http_headers.file_content_md5 + file_content_disposition = None + if file_http_headers is not None: + file_content_disposition = file_http_headers.file_content_disposition + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "properties" + + # Construct URL + url = self.set_http_headers.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_content_length is not None: + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') + if file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') + if file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') + if file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') + if file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') + if file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), + 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), + 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), + 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), + 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), + 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs): + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def release_lease(self, lease_id, timeout=None, request_id=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + """[Update] The Lease File operation establishes and manages a lock on a + file for write and delete operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, cls=None, **kwargs): + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the + start and end of the range must be specified. For an update operation, + the range can be up to 4 MB in size. For a clear operation, the range + can be up to the value of the file's full size. The File service + accepts only a single byte range for the Range and 'x-ms-range' + headers, and the byte range must be specified in the following format: + bytes=startByte-endByte. + :type range: str + :param file_range_write: Specify one of the following options: - + Update: Writes the bytes specified by the request body into the + specified range. The Range and Content-Length headers must match to + perform the update. - Clear: Clears the specified range and releases + the space used in storage for that range. To clear a range, set the + Content-Length header to zero, and set the Range header to a value + that indicates the range to clear, up to maximum file size. Possible + values include: 'update', 'clear' + :type file_range_write: str or + ~azure.storage.fileshare.models.FileRangeWriteType + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param optionalbody: Initial data. + :type optionalbody: Generator + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param content_md5: An MD5 hash of the content. This hash is used to + verify the integrity of the data during transport. When the + Content-MD5 header is specified, the File service compares the hash of + the content that has arrived with the header value that was sent. If + the two hashes do not match, the operation will fail with error code + 400 (Bad Request). + :type content_md5: bytearray + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/octet-stream' + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + """Upload a range of bytes to a file where the contents are read from a + URL. + + :param range: Writes data to the specified byte range in the file. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted + in the request body. When the x-ms-write header is set to clear, the + value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the + range of bytes that must be read from the copy source. + :type source_content_crc64: bytearray + :param source_modified_access_conditions: Additional parameters for + the operation + :type source_modified_access_conditions: + ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + source_if_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + source_if_none_match_crc64 = None + if source_modified_access_conditions is not None: + source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "range" + + # Construct URL + url = self.upload_range_from_url.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if source_content_crc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if source_if_match_crc64 is not None: + header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') + if source_if_none_match_crc64 is not None: + header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def get_range_list(self, sharesnapshot=None, prevsharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, cls=None, **kwargs): + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param prevsharesnapshot: The previous snapshot parameter is an opaque + DateTime value that, when present, specifies the previous snapshot. + :type prevsharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, + inclusively. + :type range: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: ShareFileRangeList or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareFileRangeList + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "rangelist" + + # Construct URL + url = self.get_range_list.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if prevsharesnapshot is not None: + query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ShareFileRangeList', response) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, cls=None, **kwargs): + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up + to 2 KB in length. To copy a file to another file within the same + storage account, you may use Shared Key to authenticate the source + file. If you are copying a file from another storage account, or if + you are copying a blob from the same storage account or another + storage account, then you must authenticate the source file or blob + using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a + share snapshot can also be specified as a copy source. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param copy_file_smb_info: Additional parameters for the operation + :type copy_file_smb_info: + ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + file_permission_copy_mode = None + if copy_file_smb_info is not None: + file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + ignore_read_only = None + if copy_file_smb_info is not None: + ignore_read_only = copy_file_smb_info.ignore_read_only + file_attributes = None + if copy_file_smb_info is not None: + file_attributes = copy_file_smb_info.file_attributes + file_creation_time = None + if copy_file_smb_info is not None: + file_creation_time = copy_file_smb_info.file_creation_time + file_last_write_time = None + if copy_file_smb_info is not None: + file_last_write_time = copy_file_smb_info.file_last_write_time + set_archive_attribute = None + if copy_file_smb_info is not None: + set_archive_attribute = copy_file_smb_info.set_archive_attribute + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.start_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + if file_permission_copy_mode is not None: + header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') + if ignore_read_only is not None: + header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') + if file_attributes is not None: + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + if file_creation_time is not None: + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + if file_last_write_time is not None: + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if set_archive_attribute is not None: + header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Aborts a pending Copy File operation, and leaves a destination file + with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id + header of the original Copy File operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "copy" + + # Construct URL + url = self.abort_copy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, cls=None, **kwargs): + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "listhandles" + + # Construct URL + url = self.list_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListHandlesResponse', response) + header_dict = { + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} + + def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, cls=None, **kwargs): + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory + to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "forceclosehandles" + + # Construct URL + url = self.force_close_handles.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), + 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), + 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py new file mode 100644 index 0000000..cd43e83 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py @@ -0,0 +1,253 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "service". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "service" + + def set_properties(self, storage_service_properties, timeout=None, cls=None, **kwargs): + """Sets properties for a storage account's File service endpoint, + including properties for Storage Analytics metrics and CORS + (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: + ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.set_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_properties.metadata = {'url': '/'} + + def get_properties(self, timeout=None, cls=None, **kwargs): + """Gets the properties of a storage account's File service, including + properties for Storage Analytics metrics and CORS (Cross-Origin + Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "properties" + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageServiceProperties', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_properties.metadata = {'url': '/'} + + def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, cls=None, **kwargs): + """The List Shares Segment operation returns a list of the shares and + share snapshots under the specified account. + + :param prefix: Filters the results to return only entries whose name + begins with the specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns a + marker value within the response body if the list returned was not + complete. The marker value may then be used in a subsequent call to + request the next set of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. + If the request does not specify maxresults, or specifies a value + greater than 5,000, the server will return up to 5,000 items. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets + to include in the response. + :type include: list[str or + ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: ListSharesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "list" + + # Construct URL + url = self.list_shares_segment.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ListSharesResponse', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py new file mode 100644 index 0000000..5e1bbb6 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py @@ -0,0 +1,1315 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class ShareOperations(object): + """ShareOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar restype: . Constant value: "share". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.restype = "share" + + def create(self, timeout=None, metadata=None, quota=None, cls=None, **kwargs): + """Creates a new share under the specified account. If the share with the + same name already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create.metadata = {'url': '/{shareName}'} + + def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Returns all user-defined metadata and system properties for the + specified share or share snapshot. The data returned does not include + the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.get_properties.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), + 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), + 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), + 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), + 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{shareName}'} + + def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, lease_access_conditions=None, cls=None, **kwargs): + """Operation marks the specified share or share snapshot for deletion. The + share or share snapshot and any files contained within it are later + deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the + base share and all of its snapshots. Possible values include: + 'include' + :type delete_snapshots: str or + ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{shareName}'} + + def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or + negative one (-1) for a lease that never expires. A non-infinite lease + can be between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "acquire" + + # Construct URL + url = self.acquire_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + acquire_lease.metadata = {'url': '/{shareName}'} + + def release_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "release" + + # Construct URL + url = self.release_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + release_lease.metadata = {'url': '/{shareName}'} + + def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. + The File service returns 400 (Invalid request) if the proposed lease + ID is not in the correct format. See Guid Constructor (String) for a + list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "change" + + # Construct URL + url = self.change_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + change_lease.metadata = {'url': '/{shareName}'} + + def renew_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "lease" + action = "renew" + + # Construct URL + url = self.renew_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + renew_lease.metadata = {'url': '/{shareName}'} + + def break_lease(self, timeout=None, break_period=None, request_id=None, sharesnapshot=None, lease_access_conditions=None, cls=None, **kwargs): + """The Lease Share operation establishes and manages a lock on a share, or + the specified snapshot for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param break_period: For a break operation, proposed duration the + lease should continue before it is broken, in seconds, between 0 and + 60. This break period is only used if it is shorter than the time + remaining on the lease. If longer, the time remaining on the lease is + used. A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break period. + If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, + and an infinite lease breaks immediately. + :type break_period: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime + value that, when present, specifies the share snapshot to query. + :type sharesnapshot: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "lease" + action = "break" + + # Construct URL + url = self.break_lease.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + + # Construct headers + header_parameters = {} + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), + 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + break_lease.metadata = {'url': '/{shareName}'} + + def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs): + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "snapshot" + + # Construct URL + url = self.create_snapshot.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_snapshot.metadata = {'url': '/{shareName}'} + + def create_permission(self, share_permission, timeout=None, cls=None, **kwargs): + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the + share level. + :type share_permission: + ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.create_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct body + body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + create_permission.metadata = {'url': '/{shareName}'} + + def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs): + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the + directory/file. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: SharePermission or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "filepermission" + + # Construct URL + url = self.get_permission.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SharePermission', response) + header_dict = { + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_permission.metadata = {'url': '/{shareName}'} + + def set_quota(self, timeout=None, quota=None, lease_access_conditions=None, cls=None, **kwargs): + """Sets quota for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "properties" + + # Construct URL + url = self.set_quota.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_quota.metadata = {'url': '/{shareName}'} + + def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs): + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage + object. + :type metadata: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "metadata" + + # Construct URL + url = self.set_metadata.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{shareName}'} + + def get_access_policy(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Returns information about stored access policies specified on the + share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: list or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "acl" + + # Construct URL + url = self.get_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('[SignedIdentifier]', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_access_policy.metadata = {'url': '/{shareName}'} + + def set_access_policy(self, share_acl=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Sets a stored access policy for use with shared access signatures. + + :param share_acl: The ACL for the share. + :type share_acl: + list[~azure.storage.fileshare.models.SignedIdentifier] + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "acl" + + # Construct URL + url = self.set_access_policy.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} + if share_acl is not None: + body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{shareName}'} + + def get_statistics(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.fileshare.models.LeaseAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: ShareStats or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "stats" + + # Construct URL + url = self.get_statistics.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ShareStats', response) + header_dict = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_statistics.metadata = {'url': '/{shareName}'} + + def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, cls=None, **kwargs): + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for File Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_share_name: Specifies the name of the + preivously-deleted share. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the + preivously-deleted share. + :type deleted_share_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_share_name is not None: + header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') + if deleted_share_version is not None: + header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py new file mode 100644 index 0000000..6ef707d --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2020-02-10" + diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py new file mode 100644 index 0000000..789e147 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py @@ -0,0 +1,237 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._generated.models import StorageErrorException +from ._generated.operations import FileOperations, ShareOperations + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + ShareClient = TypeVar("ShareClient") + + +class ShareLeaseClient(object): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareClient or ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'file_name'): + self._client = client._client.file # type: ignore # pylint: disable=protected-access + self._snapshot = None + elif hasattr(client, 'share_name'): + self._client = client._client.share + self._snapshot = client.snapshot + else: + raise TypeError("Lease must use ShareFileClient or ShareClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, **kwargs): + # type: (**Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file or share does not have an active lease, the File or Share service creates a + lease on the file and returns a new lease ID. + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + lease_duration = kwargs.pop('lease_duration', -1) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the share lease. + + The share lease can be renewed if the lease ID specified in the + lease client matches that associated with the share. Note that + the lease may be renewed even if it has expired as long as the share + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + if isinstance(self._client, FileOperations): + raise TypeError("Lease renewal operations are only valid for ShareClient.") + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + sharesnapshot=self._snapshot, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File or Share service will raise an error + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, **kwargs): + # type: (Any) -> int + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + lease_break_period = kwargs.pop('lease_break_period', None) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + if isinstance(self._client, ShareOperations): + kwargs['break_period'] = lease_break_period + if isinstance(self._client, FileOperations) and lease_break_period: + raise TypeError("Setting a lease break period is only applicable to Share leases.") + + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py new file mode 100644 index 0000000..5a96c48 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py @@ -0,0 +1,965 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.paging import PageIterator +from ._parser import _parse_datetime_from_str +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import StorageErrorException +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings +from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings +from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel +from ._generated.models import AccessPolicy as GenAccessPolicy +from ._generated.models import DirectoryItem + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for files. + + All required parameters must be populated in order to send to Azure. + + :keyword str version: The version of Storage Analytics to configure. + :keyword bool enabled: Required. Indicates whether metrics are enabled for the + File service. + :keyword bool include_ap_is: Indicates whether metrics should generate summary + statistics for called API operations. + :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should + persist. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + All required parameters must be populated in order to send to Azure. + + :param bool enabled: Required. Indicates whether a retention policy is enabled + for the storage service. + :param int days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. + """ + + def __init__(self, enabled=False, days=None): + self.enabled = enabled + self.days = days + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ShareSmbSettings(GeneratedShareSmbSettings): + """ Settings for the SMB protocol. + + :param SmbMultichannel multichannel: Required. Sets the multichannel settings. + """ + def __init__(self, multichannel): + self.multichannel = multichannel + + +class SmbMultichannel(GeneratedSmbMultichannel): + """ Settings for Multichannel. + + :param bool enabled: Required. If SMB Multichannel is enabled. + """ + def __init__(self, enabled): + self.enabled = enabled + + +class ShareProtocolSettings(GeneratedShareProtocolSettings): + """Protocol Settings class used by the set and get service properties methods in the share service. + + Contains protocol properties of the share service such as the SMB setting of the share service. + + :param SmbSettings smb: Required. Sets SMB settings. + """ + def __init__(self, smb): + self.smb = smb + + @classmethod + def _from_generated(cls, generated): + return cls( + smb=generated.smb) + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get acl methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.fileshare.FileSasPermissions or + ~azure.storage.fileshare.ShareSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class LeaseProperties(DictMixin): + """File Lease Properties. + + :ivar str status: + The lease status of the file or share. Possible values: locked|unlocked + :ivar str state: + Lease state of the file or share. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file or share is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """Used to store the content settings of a file. + + :param str content_type: + The content type specified for the file. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :param str content_language: + If the content_language has previously been set + for the file, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :param str content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class ShareProperties(DictMixin): + """Share's properties class. + + :ivar str name: + The name of the share. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the share was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int quota: + The allocated quota. + :ivar dict metadata: A dict with name_value pairs to associate with the + share as metadata. + :ivar str snapshot: + Snapshot of the share. + :ivar bool deleted: + To indicate if this share is deleted or not. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar datetime deleted: + To indicate the deleted time of the deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar str version: + To indicate the version of deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar int remaining_retention_days: + To indicate how many remaining days the deleted share will be kept. + This is a service returned value, and the value will be set when list shared including deleted ones. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.quota = kwargs.get('x-ms-share-quota') + self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') + self.metadata = kwargs.get('metadata') + self.snapshot = None + self.deleted = None + self.deleted_time = None + self.version = None + self.remaining_retention_days = None + self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') + self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') + self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') + self.lease = LeaseProperties(**kwargs) + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.quota = generated.properties.quota + props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time + props.metadata = generated.metadata + props.snapshot = generated.snapshot + props.deleted = generated.deleted + props.deleted_time = generated.properties.deleted_time + props.version = generated.version + props.remaining_retention_days = generated.properties.remaining_retention_days + props.provisioned_egress_mbps = generated.properties.provisioned_egress_mbps + props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_mbps + props.provisioned_iops = generated.properties.provisioned_iops + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + return props + + +class SharePropertiesPaged(PageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + prefix=self.prefix, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class Handle(DictMixin): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :keyword str handle_id: Required. XSMB service handle ID + :keyword str path: Required. File or directory name including full path starting + from share root + :keyword str file_id: Required. FileId uniquely identifies the file or + directory. + :keyword str parent_id: ParentId uniquely identifies the parent directory of the + object. + :keyword str session_id: Required. SMB session ID in context of which the file + handle was opened + :keyword str client_ip: Required. Client IP that opened the handle + :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('handle_id') + self.path = kwargs.get('path') + self.file_id = kwargs.get('file_id') + self.parent_id = kwargs.get('parent_id') + self.session_id = kwargs.get('session_id') + self.client_ip = kwargs.get('client_ip') + self.open_time = kwargs.get('open_time') + self.last_reconnect_time = kwargs.get('last_reconnect_time') + + @classmethod + def _from_generated(cls, generated): + handle = cls() + handle.id = generated.handle_id + handle.path = generated.path + handle.file_id = generated.file_id + handle.parent_id = generated.parent_id + handle.session_id = generated.session_id + handle.client_ip = generated.client_ip + handle.open_time = generated.open_time + handle.last_reconnect_time = generated.last_reconnect_time + return handle + + +class HandlesPaged(PageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryProperties(DictMixin): + """Directory's properties class. + + :ivar str name: + The name of the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool server_encrypted: + Whether encryption is enabled. + :keyword dict metadata: A dict with name_value pairs to associate with the + directory as metadata. + :ivar change_time: Change time for the file. + :vartype change_time: str or ~datetime.datetime + :ivar creation_time: Creation time for the file. + :vartype creation_time: str or ~datetime.datetime + :ivar last_write_time: Last write time for the file. + :vartype last_write_time: str or ~datetime.datetime + :ivar file_attributes: + The file system attributes for files and directories. + :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :ivar permission_key: Key of the permission to be set for the + directory/file. + :vartype permission_key: str + :ivar file_id: Required. FileId uniquely identifies the file or + directory. + :vartype file_id: str + :ivar parent_id: ParentId uniquely identifies the parent directory of the + object. + :vartype parent_id: str + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.metadata = kwargs.get('metadata') + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.server_encrypted = generated.properties.server_encrypted + props.metadata = generated.metadata + return props + + +class DirectoryPropertiesPaged(PageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] + self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) + return self._response.next_marker or None, self.current_page + + +class FileProperties(DictMixin): + """File's properties class. + + :ivar str name: + The name of the file. + :ivar str path: + The path of the file. + :ivar str share: + The name of share. + :ivar str snapshot: + File snapshot. + :ivar int content_length: + Size of file in bytes. + :ivar dict metadata: A dict with name_value pairs to associate with the + file as metadata. + :ivar str file_type: + Type of the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + Size of file in bytes. + :ivar str content_range: + The range of bytes. + :ivar bool server_encrypted: + Whether encryption is enabled. + :ivar copy: + The copy properties. + :vartype copy: ~azure.storage.fileshare.CopyProperties + :ivar content_settings: + The content settings for the file. + :vartype content_settings: ~azure.storage.fileshare.ContentSettings + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.path = None + self.share = None + self.snapshot = None + self.content_length = kwargs.get('Content-Length') + self.metadata = kwargs.get('metadata') + self.file_type = kwargs.get('x-ms-type') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.content_length = generated.properties.content_length + props.metadata = generated.properties.metadata + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + return props + + +class CopyProperties(DictMixin): + """File Copy Properties. + + :ivar str id: + String identifier for the last attempted Copy File operation where this file + was the destination file. This header does not appear if this file has never + been the destination in a Copy File operation, or if this file has been + modified after a concluded Copy File operation. + :ivar str source: + URL up to 2 KB in length that specifies the source file used in the last attempted + Copy File operation where this file was the destination file. This header does not + appear if this file has never been the destination in a Copy File operation, or if + this file has been modified after a concluded Copy File operation. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy File. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy File operation where this file was the destination file. Can show + between 0 and Content-Length bytes copied. + :ivar datetime completion_time: + Conclusion time of the last attempted Copy File operation where this file was the + destination file. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source file to a destination file. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar datetime destination_snapshot: + Included if the file is incremental copy or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this file. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class FileSasPermissions(object): + """FileSasPermissions class to be used with + generating shared access signature operations. + + :param bool read: + Read the content, properties, metadata. Use the file as the source of a copy + operation. + :param bool create: + Create a new file or copy a file to a new file. + :param bool write: + Create or write content, properties, metadata. Resize the file. Use the file + as the destination of a copy operation within the same account. + :param bool delete: + Delete the file. + """ + def __init__(self, read=False, create=False, write=False, delete=False): + self.read = read + self.create = create + self.write = write + self.delete = delete + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSasPermissions from a string. + + To specify read, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + create permissions, you would provide a string "rc". + + :param str permission: The string which dictates the read, create, + write, or delete permissions + :return: A FileSasPermissions object + :rtype: ~azure.storage.fileshare.FileSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + + parsed = cls(p_read, p_create, p_write, p_delete) + + return parsed + + +class ShareSasPermissions(object): + """ShareSasPermissions class to be used to be used with + generating shared access signature and access policy operations. + + :param bool read: + Read the content, properties or metadata of any file in the share. Use any + file in the share as the source of a copy operation. + :param bool write: + For any file in the share, create or write content, properties or metadata. + Resize the file. Use the file as the destination of a copy operation within + the same account. + Note: You cannot grant permissions to read or write share properties or + metadata with a service SAS. Use an account SAS instead. + :param bool delete: + Delete any file in the share. + Note: You cannot grant permissions to delete a share with a service SAS. Use + an account SAS instead. + :param bool list: + List files and directories in the share. + """ + def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin + self.read = read + self.write = write + self.delete = delete + self.list = list + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '')) + + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ShareSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, + delete, or list permissions + :return: A ShareSasPermissions object + :rtype: ~azure.storage.fileshare.ShareSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + + parsed = cls(p_read, p_write, p_delete, p_list) + + return parsed + +class NTFSAttributes(object): + """ + Valid set of attributes to set for file or directory. + To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. + + :ivar bool read_only: + Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE + :ivar bool hidden: + Enable/disable 'Hidden' attribute for DIRECTORY or FILE + :ivar bool system: + Enable/disable 'System' attribute for DIRECTORY or FILE + :ivar bool none: + Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY + :ivar bool directory: + Enable/disable 'Directory' attribute for DIRECTORY + :ivar bool archive: + Enable/disable 'Archive' attribute for DIRECTORY or FILE + :ivar bool temporary: + Enable/disable 'Temporary' attribute for FILE + :ivar bool offline: + Enable/disable 'Offline' attribute for DIRECTORY or FILE + :ivar bool not_content_indexed: + Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE + :ivar bool no_scrub_data: + Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE + """ + def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, + temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): + + self.read_only = read_only + self.hidden = hidden + self.system = system + self.none = none + self.directory = directory + self.archive = archive + self.temporary = temporary + self.offline = offline + self.not_content_indexed = not_content_indexed + self.no_scrub_data = no_scrub_data + self._str = (('ReadOnly|' if self.read_only else '') + + ('Hidden|' if self.hidden else '') + + ('System|' if self.system else '') + + ('None|' if self.none else '') + + ('Directory|' if self.directory else '') + + ('Archive|' if self.archive else '') + + ('Temporary|' if self.temporary else '') + + ('Offline|' if self.offline else '') + + ('NotContentIndexed|' if self.not_content_indexed else '') + + ('NoScrubData|' if self.no_scrub_data else '')) + + def __str__(self): + concatenated_params = self._str + return concatenated_params.strip('|') + + @classmethod + def from_string(cls, string): + """Create a NTFSAttributes from a string. + + To specify permissions you can pass in a string with the + desired permissions, e.g. "ReadOnly|Hidden|System" + + :param str string: The string which dictates the permissions. + :return: A NTFSAttributes object + :rtype: ~azure.storage.fileshare.NTFSAttributes + """ + read_only = "ReadOnly" in string + hidden = "Hidden" in string + system = "System" in string + none = "None" in string + directory = "Directory" in string + archive = "Archive" in string + temporary = "Temporary" in string + offline = "Offline" in string + not_content_indexed = "NotContentIndexed" in string + no_scrub_data = "NoScrubData" in string + + parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, + no_scrub_data) + parsed._str = string # pylint: disable = protected-access + return parsed + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access + } diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py new file mode 100644 index 0000000..db7cab5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py @@ -0,0 +1,42 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import datetime, timedelta + +_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' +_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ + 'please use file_permission_key' + + +def _get_file_permission(file_permission, file_permission_key, default_permission): + # if file_permission and file_permission_key are both empty, then use the default_permission + # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used + if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: + raise ValueError(_FILE_PERMISSION_TOO_LONG) + + if not file_permission: + if not file_permission_key: + return default_permission + return None + + if not file_permission_key: + return file_permission + + raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) + + +def _parse_datetime_from_str(string_datetime): + if not string_datetime: + return None + dt, _, us = string_datetime.partition(".") + dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") + us = int(us[:-2]) # microseconds + datetime_obj = dt + timedelta(microseconds=us) + return datetime_obj + + +def _datetime_to_str(datetime_obj): + return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py new file mode 100644 index 0000000..fc02b90 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py @@ -0,0 +1,113 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from azure.core import MatchConditions + +from ._parser import _datetime_to_str, _get_file_permission +from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-12-12', + '2020-02-10', +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if etag_param in kwargs: + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + + +def get_access_conditions(lease): + # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_smb_properties(kwargs): + # type: (Dict[str, Any]) -> Dict[str, Any] + ignore_read_only = kwargs.pop('ignore_read_only', None) + set_archive_attribute = kwargs.pop('set_archive_attribute', None) + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('permission_key', None) + file_attributes = kwargs.pop('file_attributes', None) + file_creation_time = kwargs.pop('file_creation_time', None) or "" + file_last_write_time = kwargs.pop('file_last_write_time', None) or "" + + file_permission_copy_mode = None + file_permission = _get_file_permission(file_permission, file_permission_key, None) + + if file_permission: + if file_permission.lower() == "source": + file_permission = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + elif file_permission_key: + if file_permission_key.lower() == "source": + file_permission_key = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + return { + 'file_permission': file_permission, + 'file_permission_key': file_permission_key, + 'copy_file_smb_info': CopyFileSmbInfo( + file_permission_copy_mode=file_permission_copy_mode, + ignore_read_only=ignore_read_only, + file_attributes=file_attributes, + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + set_archive_attribute=set_archive_attribute + ) + + } + +def get_api_version(kwargs, default): + # type: (Dict[str, Any]) -> str + api_version = kwargs.pop('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or default diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py new file mode 100644 index 0000000..111e91c --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py @@ -0,0 +1,806 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from ._generated import AzureFileStorage +from ._generated.version import VERSION +from ._generated.models import ( + StorageErrorException, + SignedIdentifier, + DeleteSnapshotsOptionType, + SharePermission) +from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission +from ._serialize import get_api_version, get_access_conditions +from ._directory_client import ShareDirectoryClient +from ._file_client import ShareFileClient +from ._lease import ShareLeaseClient + +if TYPE_CHECKING: + from ._models import ShareProperties, AccessPolicy + + +class ShareClient(StorageAccountHostsMixin): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + @classmethod + def from_share_url(cls, share_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareClient + """ + :param str share_url: The full URI to the share. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + """ + try: + if not share_url.lower().startswith('http'): + share_url = "https://" + share_url + except AttributeError: + raise ValueError("Share URL must be a string.") + parsed_url = urlparse(share_url.rstrip('/')) + if not (parsed_url.path and parsed_url.netloc): + raise ValueError("Invalid URL: {}".format(share_url)) + + share_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(share_path) > 1: + account_path = "/" + "/".join(share_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + + share_name = unquote(share_path[-1]) + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + if not share_name: + raise ValueError("Invalid URL. Please provide a URL with a valid share name") + return cls(account_url, share_name, path_snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + snapshot=None, # type: Optional[str] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareClient + """Create ShareClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str snapshot: + The optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_client_from_conn_string] + :end-before: [END create_share_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Gets the share client from connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode) + + @distributed_trace + def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the share does not have an active lease, the Share + Service creates a lease on the share and returns a new lease. + + .. versionadded:: 12.6.0 + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Share Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START acquire_and_release_lease_on_share] + :end-before: [END acquire_and_release_lease_on_share] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a share. + """ + kwargs['lease_duration'] = lease_duration + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 8 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 12 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 12 + :caption: Deletes the share and any snapshots. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + delete_snapshots=delete_include, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 12 + :caption: Gets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + props = self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace + def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 12 + :caption: Sets the share quota. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.set_quota( # type: ignore + timeout=timeout, + quota=quota, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 12 + :caption: Sets the share metadata. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + try: + return self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.share.get_statistics( + timeout=timeout, + lease_access_conditions=access_conditions, + **kwargs) + return stats.share_usage_bytes # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 12 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @staticmethod + def _create_permission_for_share_options(file_permission, # type: str + **kwargs): + options = { + 'share_permission': SharePermission(permission=file_permission), + 'cls': deserialize_permission_key, + 'timeout': kwargs.pop('timeout', None), + } + options.update(kwargs) + return options + + @distributed_trace + def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return self._client.share.create_permission(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace + def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py new file mode 100644 index 0000000..d6b240e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py @@ -0,0 +1,415 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Dict, List, + TYPE_CHECKING +) +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.response_handlers import process_storage_error +from ._generated import AzureFileStorage +from ._generated.models import StorageErrorException, StorageServiceProperties +from ._generated.version import VERSION +from ._share_client import ShareClient +from ._serialize import get_api_version +from ._models import ( + SharePropertiesPaged, + service_properties_deserialize, +) + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ( + ShareProperties, + Metrics, + CorsRule, + ShareProtocolSettings + ) + + +class ShareServiceClient(StorageAccountHostsMixin): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File Share service.") + + _, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> ShareServiceClient + """Create ShareServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :returns: A File Share service client. + :rtype: ~azure.storage.fileshare.ShareServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client_from_conn_string] + :end-before: [END create_share_service_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Create the share service client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 8 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + protocol=None, # type: Optional[ShareProtocolSettings], + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :param protocol: + Sets protocol settings + :type protocol: ~azure.storage.fileshare.ShareProtocolSettings + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + protocol=protocol + ) + try: + self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ShareProperties] + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 12 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace + def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 8 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace + def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 12 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace + def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.ShareClient + """ + share = self.get_share_client(deleted_share_name) + + try: + share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except StorageErrorException as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, + _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py new file mode 100644 index 0000000..14deea6 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py @@ -0,0 +1,437 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, + Optional, + Any, + Iterable, + Dict, + List, + Type, + Tuple, + TYPE_CHECKING, +) +import logging + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, + "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, + "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except StorageErrorException as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict(conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + except KeyError: + credential = conn_settings.get("SharedAccessSignature") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DefaultEndpointsProtocol"], + conn_settings["AccountName"], + service, + conn_settings["EndpointSuffix"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py new file mode 100644 index 0000000..d252ad0 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py @@ -0,0 +1,179 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .._generated.models import StorageErrorException +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except StorageErrorException as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py new file mode 100644 index 0000000..7fb05b5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated.version import VERSION + + +X_MS_VERSION = VERSION + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 2000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py new file mode 100644 index 0000000..27cd236 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py @@ -0,0 +1,468 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes + +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.fileshare.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.fileshare.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.fileshare.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py new file mode 100644 index 0000000..c9bc798 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py @@ -0,0 +1,610 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py new file mode 100644 index 0000000..4f15b65 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py @@ -0,0 +1,147 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py new file mode 100644 index 0000000..ac526e5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py @@ -0,0 +1,159 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.location_mode, deserialized + + +def process_storage_error(storage_error): + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body.iter(): + if info.tag.lower() == 'code': + error_code = info.text + elif info.tag.lower() == 'message': + error_message = info.text + else: + additional_data[info.tag] = info.text + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py new file mode 100644 index 0000000..abf3fb2 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py @@ -0,0 +1,550 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + executor = futures.ThreadPoolExecutor(max_concurrency) + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = self._upload_substream_block(block_id, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, block_id, block_stream): + try: + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py new file mode 100644 index 0000000..f6a8725 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py @@ -0,0 +1,351 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + return sorted(range_ids) + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, block_id, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = await self._upload_substream_block(block_id, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, block_id, block_stream): + try: + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py new file mode 100644 index 0000000..20dad95 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py @@ -0,0 +1,491 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, List, TYPE_CHECKING +) + +from ._shared import sign_string +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants +from ._shared.parser import _str + +if TYPE_CHECKING: + from datetime import datetime + from .import ( + ResourceTypes, + AccountSasPermissions, + ShareSasPermissions, + FileSasPermissions + ) + +class FileSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating file and share access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + ''' + super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + + def generate_file(self, share_name, directory_name=None, file_name=None, + permission=None, expiry=None, start=None, policy_id=None, + ip=None, protocol=None, cache_control=None, + content_disposition=None, content_encoding=None, + content_language=None, content_type=None): + ''' + Generates a shared access signature for the file. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param str directory_name: + Name of directory. SAS tokens cannot be created for directories, so + this parameter should only be present if file_name is provided. + :param str file_name: + Name of file. + :param ~azure.storage.fileshare.FileSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = share_name + if directory_name is not None: + resource_path += '/' + _str(directory_name) if directory_name is not None else None + resource_path += '/' + _str(file_name) if file_name is not None else None + + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('f') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, resource_path) + + return sas.get_token() + + def generate_share(self, share_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the share. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param ShareSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('s') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, share_name) + + return sas.get_token() + + +class _FileSharedAccessHelper(_SharedAccessHelper): + + def add_resource_signature(self, account_name, account_key, path): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/file/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource + + get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for the file service. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ResourceTypes resource_types: + Specifies the resource types that are accessible with the account SAS. + :param ~azure.storage.fileshare.AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 8 + :caption: Generate a sas token. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(fileshare=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_share_sas( + account_name, # type: str + share_name, # type: str + account_key, # type: str + permission=None, # type: Optional[Union[ShareSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for a share. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ShareSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + return sas.generate_share( + share_name=share_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_file_sas( + account_name, # type: str + share_name, # type: str + file_path, # type: List[str] + account_key, # type: str + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param file_path: + The file path represented as a list of path segments, including the file name. + :type file_path: List[str] + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.FileSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + if len(file_path) > 1: + dir_path = '/'.join(file_path[:-1]) + else: + dir_path = None # type: ignore + return sas.generate_file( # type: ignore + share_name=share_name, + directory_name=dir_path, + file_name=file_path[-1], + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py new file mode 100644 index 0000000..3174c50 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.3.0b1" diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py new file mode 100644 index 0000000..73393b8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._file_client_async import ShareFileClient +from ._directory_client_async import ShareDirectoryClient +from ._share_client_async import ShareClient +from ._share_service_client_async import ShareServiceClient +from ._lease_async import ShareLeaseClient + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py new file mode 100644 index 0000000..29b6396 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py @@ -0,0 +1,593 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +import time +from typing import ( # pylint: disable=unused-import + Optional, Union, Any, Dict, TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _get_file_permission, _datetime_to_str +from .._shared.parser import _str + +from .._generated.aio import AzureFileStorage +from .._generated.version import VERSION +from .._generated.models import StorageErrorException +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_directory_properties +from .._serialize import get_api_version +from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase +from ._file_client_async import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes + from .._generated.models import HandleItem + + +class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + directory_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareDirectoryClient, self).__init__( + account_url, + share_name=share_name, + directory_path=directory_path, + snapshot=snapshot, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param str file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 16 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) + + @distributed_trace_async + async def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 16 + :caption: Creates a directory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.directory.create( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 16 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + await self._client.directory.delete(timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], Any) -> AsyncItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 16 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> AsyncItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace_async + async def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace_async + async def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return await self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 16 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace_async + async def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 16 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace_async + async def upload_file( + self, file_name, # type: str + data, # type: Any + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 16 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.upload_file( + data, + length=length, + **kwargs) + return file_client # type: ignore + + @distributed_trace_async + async def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 16 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py new file mode 100644 index 0000000..c0db16d --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py @@ -0,0 +1,467 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import asyncio +import sys +from io import BytesIO +from itertools import islice +import warnings + +from azure.core.exceptions import HttpResponseError +from .._shared.encryption import decrypt_blob +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._download import process_range_and_offset, _ChunkDownloader + + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = data.response.body() + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in file download stream.""" + + def __init__(self, size, content, downloader): + self.size = size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # If no iterator was supplied, the download completed with + # the initial GET, so we just return that data + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + else: + try: + chunk = next(self._iter_chunks) + except StopIteration: + raise StopAsyncIteration("Download complete") + self._current_content = await self._iter_downloader.yield_chunk(chunk) + + return self._current_content + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + try: + location_mode, response = await self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + return response + + def chunks(self): + """Iterate over chunks in the download stream. + + :rtype: Iterable[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader) + + async def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + _done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + await asyncio.wait(running_futures) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py new file mode 100644 index 0000000..d008e1b --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py @@ -0,0 +1,1197 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods +import functools +import time +from io import BytesIO +from typing import Optional, Union, IO, List, Tuple, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.async_paging import AsyncItemPaged + +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _datetime_to_str, _get_file_permission +from .._shared.parser import _str + +from .._generated.aio import AzureFileStorage +from .._generated.version import VERSION +from .._generated.models import StorageErrorException, FileHTTPHeaders +from .._shared.policies_async import ExponentialRetry +from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.request_handlers import add_metadata_headers, get_length +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result +from .._serialize import get_access_conditions, get_smb_properties, get_api_version +from .._file_client import ShareFileClient as ShareFileClientBase +from ._models import HandlesPaged +from ._lease_async import ShareLeaseClient +from ._download_async import StorageStreamDownloader + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes + from .._generated.models import HandleItem + + +async def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + **kwargs +): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = await client.create_file( + size, content_settings=content_settings, metadata=metadata, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + timeout=timeout, + **kwargs + ) + if size == 0: + return response + + responses = await upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except StorageErrorException as error: + process_storage_error(error) + + +class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): + """A client to interact with a specific file, although that file may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + + def __init__( # type: ignore + self, + account_url, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareFileClient, self).__init__( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, + credential=credential, loop=loop, **kwargs + ) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + @distributed_trace_async + async def acquire_lease(self, lease_id=None, **kwargs): + # type: (Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + kwargs['lease_duration'] = -1 + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_file( # type: ignore + self, + size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 16 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + try: + return await self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_file( + self, data, # type: Any + length=None, # type: Optional[int] + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Uploads a new file. + + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword str encoding: + Defaults to UTF-8. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 16 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + + if isinstance(data, six.text_type): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, "read"): + stream = data + elif hasattr(data, "__iter__"): + stream = IterStreamer(data, encoding=encoding) # type: ignore + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return await _upload_file_helper( # type: ignore + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + **kwargs + ) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 16 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return await self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id["copy_id"] + except TypeError: + pass + try: + await self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def download_file( + self, + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Iterable[bytes] + """Downloads a file to a stream with automatic chunking. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A iterable data generator (stream) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 16 + :caption: Download a file. + """ + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + downloader = StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + encryption_options=None, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs + ) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 16 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = await self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = "/".join(self.file_path) + return file_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop("size", None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.file.set_metadata( # type: ignore + metadata=metadata, lease_access_conditions=access_conditions, + timeout=timeout, cls=return_response_headers, headers=headers, **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range( # type: ignore + self, + data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if isinstance(data, six.text_type): + data = data.encode(encoding) + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return await self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.file.upload_range_from_url(**options) # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A list of valid ranges. + :rtype: List[dict[str, int]] + """ + options = self._get_ranges_options( + offset=offset, + length=length, + **kwargs) + try: + ranges = await self._client.file.get_range_list(**options) + except StorageErrorException as error: + process_storage_error(error) + return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] + + @distributed_trace_async + async def get_ranges_diff( # type: ignore + self, + previous_sharesnapshot, # type: Union[str, Dict[str, Any]] + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + .. versionadded:: 12.6.0 + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :param str previous_sharesnapshot: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous file snapshot to be compared + against a more recent snapshot or the current file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. + The first element are filled file ranges, the 2nd element is cleared file ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_ranges_options( + offset=offset, + length=length, + previous_sharesnapshot=previous_sharesnapshot, + **kwargs) + try: + ranges = await self._client.file.get_range_list(**options) + except StorageErrorException as error: + process_storage_error(error) + return get_file_ranges_result(ranges) + + @distributed_trace_async + async def clear_range( # type: ignore + self, + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = "bytes={0}-{1}".format(offset, end_range) + try: + return await self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> AsyncItemPaged + """Lists handles for file. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop("results_per_page", None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except StorageErrorException as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py new file mode 100644 index 0000000..0f6fdb3 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py @@ -0,0 +1,229 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._generated.models import ( + StorageErrorException) +from .._generated.aio.operations_async import FileOperations, ShareOperations +from .._lease import ShareLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + ShareClient = TypeVar("ShareClient") + + +class ShareLeaseClient(LeaseClientBase): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareClient or ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, **kwargs): + # type: (**Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file or share does not have an active lease, the File or Share service creates a + lease on the file and returns a new lease ID. + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + lease_duration = kwargs.pop('lease_duration', -1) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the share lease. + + The share lease can be renewed if the lease ID specified in the + lease client matches that associated with the share. Note that + the lease may be renewed even if it has expired as long as the share + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + if isinstance(self._client, FileOperations): + raise TypeError("Lease renewal operations are only valid for ShareClient.") + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + sharesnapshot=self._snapshot, + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File or Share service raises an error + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, **kwargs): + # type: (Any) -> int + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + lease_break_period = kwargs.pop('lease_break_period', None) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + if isinstance(self._client, ShareOperations): + kwargs['break_period'] = lease_break_period + if isinstance(self._client, FileOperations) and lease_break_period: + raise TypeError("Setting a lease break period is only applicable to Share leases.") + + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py new file mode 100644 index 0000000..affee8f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py @@ -0,0 +1,178 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator + +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error +from .._generated.models import StorageErrorException +from .._generated.models import DirectoryItem +from .._models import Handle, ShareProperties + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class SharePropertiesPaged(AsyncPageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class HandlesPaged(AsyncPageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryPropertiesPaged(AsyncPageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except StorageErrorException as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] + self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) + return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py new file mode 100644 index 0000000..e4f9b65 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py @@ -0,0 +1,664 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.pipeline import AsyncPipeline +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from .._generated.aio import AzureFileStorage +from .._generated.version import VERSION +from .._generated.models import ( + StorageErrorException, + SignedIdentifier, + DeleteSnapshotsOptionType) +from .._deserialize import deserialize_share_properties, deserialize_permission +from .._serialize import get_api_version, get_access_conditions +from .._share_client import ShareClient as ShareClientBase +from ._directory_client_async import ShareDirectoryClient +from ._file_client_async import ShareFileClient +from ..aio._lease_async import ShareLeaseClient + + +if TYPE_CHECKING: + from .._models import ShareProperties, AccessPolicy + + +class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareClient, self).__init__( + account_url, + share_name=share_name, + snapshot=snapshot, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) + + @distributed_trace_async() + async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the share does not have an active lease, the Share + Service creates a lease on the share and returns a new lease. + + .. versionadded:: 12.6.0 + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Share Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START acquire_lease_on_share] + :end-before: [END acquire_lease_on_share] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a share. + """ + kwargs['lease_duration'] = lease_duration + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 12 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return await self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 16 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 16 + :caption: Deletes the share and any snapshots. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + await self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + delete_snapshots=delete_include, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world_async.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 16 + :caption: Gets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + props = await self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace_async + async def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 16 + :caption: Sets the share quota. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.set_quota( # type: ignore + timeout=timeout, + quota=quota, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 16 + :caption: Sets the share metadata. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + + try: + return await self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-02-02'. + + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.share.get_statistics( + timeout=timeout, + lease_access_conditions=access_conditions, + **kwargs) + return stats.share_usage_bytes # type: ignore + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( # type: ignore + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 16 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @distributed_trace_async + async def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return await self._client.share.create_permission(**options) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + await directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace_async + async def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py new file mode 100644 index 0000000..af67dcd --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py @@ -0,0 +1,368 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import process_storage_error +from .._shared.policies_async import ExponentialRetry +from .._generated.aio import AzureFileStorage +from .._generated.models import StorageErrorException, StorageServiceProperties +from .._generated.version import VERSION +from .._share_service_client import ShareServiceClient as ShareServiceClientBase +from .._serialize import get_api_version +from ._share_client_async import ShareClient +from ._models import SharePropertiesPaged +from .._models import service_properties_deserialize + +if TYPE_CHECKING: + from datetime import datetime + from .._shared.models import ResourceTypes, AccountSasPermissions + from .._models import ( + ShareProperties, + Metrics, + CorsRule, + ShareProtocolSettings, + ) + + +class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication_async.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareServiceClient, self).__init__( + account_url, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access + self._loop = loop + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 12 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + protocol=None, # type: Optional[ShareProtocolSettings], + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :param protocol_settings: + Sets protocol settings + :type protocol: ~azure.storage.fileshare.ShareProtocolSettings + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + protocol=protocol + ) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except StorageErrorException as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs # type: Any + ): # type: (...) -> AsyncItemPaged + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 16 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace_async + async def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 12 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace_async + async def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 16 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace_async + async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.aio.ShareClient + """ + share = self.get_share_client(deleted_share_name) + try: + await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except StorageErrorException as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_models.py b/azure/multiapi/storagev2/queue/v2018_03_28/_models.py index 8fc8a30..24d3b4b 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_models.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_models.py @@ -409,7 +409,7 @@ def from_string(cls, permission): p_process = 'p' in permission parsed = cls(p_read, p_add, p_update, p_process) - parsed._str = permission # pylint: disable = protected-access + return parsed diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py b/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py index 6190a2b..f8e6263 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py @@ -158,7 +158,7 @@ def from_connection_string( credential=None, # type: Any **kwargs # type: Any ): - # type: (...) -> None + # type: (...) -> QueueClient """Create QueueClient from a Connection String. :param str conn_str: diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/authentication.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/authentication.py index a8db96d..d04c1e4 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/authentication.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/authentication.py @@ -64,27 +64,32 @@ def __init__(self, account_name, account_key): self.account_key = account_key super(SharedKeyCredentialPolicy, self).__init__() - def _get_headers(self, request, headers_to_sign): + @staticmethod + def _get_headers(request, headers_to_sign): headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) if 'content-length' in headers and headers['content-length'] == '0': del headers['content-length'] return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - def _get_verb(self, request): + @staticmethod + def _get_verb(request): return request.http_request.method + '\n' def _get_canonicalized_resource(self, request): uri_path = urlparse(request.http_request.url).path try: if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): uri_path = URL(uri_path) return '/' + self.account_name + str(uri_path) except TypeError: pass return '/' + self.account_name + uri_path - def _get_canonicalized_headers(self, request): + @staticmethod + def _get_canonicalized_headers(request): string_to_sign = '' x_ms_headers = [] for name, value in request.http_request.headers.items(): @@ -96,8 +101,9 @@ def _get_canonicalized_headers(self, request): string_to_sign += ''.join([name, ':', value, '\n']) return string_to_sign - def _get_canonicalized_resource_query(self, request): - sorted_queries = [(name, value) for name, value in request.http_request.query.items()] + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) sorted_queries.sort() string_to_sign = '' diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py index b81ced7..fd3c32e 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py @@ -84,12 +84,17 @@ def __init__( raise ValueError("Invalid service: {}".format(service)) service_name = service.split('-')[0] account = parsed_url.netloc.split(".{}.core.".format(service_name)) + self.account_name = account[0] if len(account) > 1 else None - secondary_hostname = None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") - self.credential = format_shared_key_credential(account, credential) + self.credential = _format_shared_key_credential(self.account_name, credential) if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None if hasattr(self.credential, "account_name"): self.account_name = self.credential.account_name secondary_hostname = "{}-secondary.{}.{}".format( @@ -246,6 +251,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs) ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, Pipeline(config.transport, policies=policies) def _batch_send( @@ -318,11 +325,11 @@ def __exit__(self, *args): # pylint: disable=arguments-differ pass -def format_shared_key_credential(account, credential): +def _format_shared_key_credential(account_name, credential): if isinstance(credential, six.string_types): - if len(account) < 2: + if not account_name: raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account[0], "account_key": credential} + credential = {"account_name": account_name, "account_key": credential} if isinstance(credential, dict): if "account_name" not in credential: raise ValueError("Shared key credential missing 'account_name") diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py index 1772251..d252ad0 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py @@ -102,6 +102,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs), ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, AsyncPipeline(config.transport, policies=policies) async def _batch_send( diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/models.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/models.py index 0da6b8d..4a8d4a3 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/models.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/models.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes from enum import Enum @@ -310,6 +311,8 @@ class AccountSasPermissions(object): Permits write permissions to the specified resource type. :param bool delete: Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. :param bool list: Valid for Service and Container resource types only. :param bool add: @@ -322,25 +325,37 @@ class AccountSasPermissions(object): Valid for the following Object resource types only: queue messages. :param bool process: Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. """ - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False): + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): self.read = read self.write = write self.delete = delete + self.delete_previous_version = delete_previous_version self.list = list self.add = add self.create = create self.update = update self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) self._str = (('r' if self.read else '') + - ('w' if self.write else '') + + ('w' if self.write else '') + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + ('l' if self.list else '') + ('a' if self.add else '') + ('c' if self.create else '') + ('u' if self.update else '') + - ('p' if self.process else '')) + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) def __str__(self): return self._str @@ -355,22 +370,27 @@ def from_string(cls, permission): :param str permission: Specify permissions in the string with the first letter of the word. - :return: A AccountSasPermissions object + :return: An AccountSasPermissions object :rtype: ~azure.storage.queue.AccountSasPermissions """ p_read = 'r' in permission p_write = 'w' in permission p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission p_list = 'l' in permission p_add = 'a' in permission p_create = 'c' in permission p_update = 'u' in permission p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) - parsed = cls(p_read, p_write, p_delete, p_list, p_add, p_create, p_update, p_process) - parsed._str = permission # pylint: disable = protected-access return parsed + class Services(object): """Specifies the services accessible with the account SAS. diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py index 2ba9ea4..c9bc798 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies.py @@ -322,7 +322,7 @@ def __init__(self, **kwargs): # pylint: disable=unused-argument @staticmethod def get_content_md5(data): - md5 = hashlib.md5() #nosec + md5 = hashlib.md5() # nosec if isinstance(data, bytes): md5.update(data) elif hasattr(data, 'read'): diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies_async.py index c0a4476..e0926b8 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/policies_async.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method import asyncio import random diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py index 623fa16..abf3fb2 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py @@ -520,9 +520,11 @@ def __iter__(self): def seekable(self): return False - def next(self): + def __next__(self): return next(self.iterator) + next = __next__ # Python 2 compatibility. + def tell(self, *args, **kwargs): raise UnsupportedOperation("Data generator does not support tell.") @@ -534,7 +536,7 @@ def read(self, size): count = len(self.leftover) try: while count < size: - chunk = self.next() + chunk = self.__next__() if isinstance(chunk, six.text_type): chunk = chunk.encode(self.encoding) data += chunk diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_version.py b/azure/multiapi/storagev2/queue/v2018_03_28/_version.py index b8f9775..100d81d 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_version.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_version.py @@ -9,4 +9,4 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "12.1.2" +VERSION = "12.1.3" diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py index cc5851e..21e7105 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method import functools from typing import ( # pylint: disable=unused-import diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py index 4d32d1f..42d0d45 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method import functools from typing import ( # pylint: disable=unused-import diff --git a/setup.py b/setup.py index 1b50497..b99f01a 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup( name='azure-multiapi-storage', - version='0.4.1', + version='0.5.0', description='Microsoft Azure Storage Client Library for Python with multi API version support.', long_description=open('README.rst', 'r').read(), license='MIT',