diff --git a/changelogs/fragments/54435_aws_s3_fix_removing_versioned_buckets.yaml b/changelogs/fragments/54435_aws_s3_fix_removing_versioned_buckets.yaml new file mode 100644 index 00000000000..fa359f648bc --- /dev/null +++ b/changelogs/fragments/54435_aws_s3_fix_removing_versioned_buckets.yaml @@ -0,0 +1,2 @@ +bugfixes: + - aws_s3 - Delete objects and delete markers so versioned buckets can be removed. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.10.rst b/docs/docsite/rst/porting_guides/porting_guide_2.10.rst index 452c3abfd3b..2614afdbb02 100644 --- a/docs/docsite/rst/porting_guides/porting_guide_2.10.rst +++ b/docs/docsite/rst/porting_guides/porting_guide_2.10.rst @@ -122,6 +122,7 @@ Noteworthy module changes * :ref:`docker_container `'s support for port ranges was adjusted to be more compatible to the ``docker`` command line utility: a one-port container range combined with a multiple-port host range will no longer result in only the first host port be used, but the whole range being passed to Docker so that a free port in that range will be used. * :ref:`purefb_fs ` no longer supports the deprecated ``nfs`` option. This has been superceeded by ``nfsv3``. * :ref:`nxos_igmp_interface ` no longer supports the deprecated ``oif_prefix`` and ``oif_source`` options. These have been superceeded by ``oif_ps``. +* :ref:`aws_s3 ` can now delete versioned buckets even when they are not empty - set mode to delete to delete a versioned bucket and everything in it. Plugins diff --git a/lib/ansible/modules/cloud/amazon/aws_s3.py b/lib/ansible/modules/cloud/amazon/aws_s3.py index ff33ee11f1d..54874f05cec 100644 --- a/lib/ansible/modules/cloud/amazon/aws_s3.py +++ b/lib/ansible/modules/cloud/amazon/aws_s3.py @@ -410,6 +410,19 @@ def paginated_list(s3, **pagination_params): yield [data['Key'] for data in page.get('Contents', [])] +def paginated_versioned_list_with_fallback(s3, **pagination_params): + try: + versioned_pg = s3.get_paginator('list_object_versions') + for page in versioned_pg.paginate(**pagination_params): + delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] + current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + yield delete_markers + current_objects + except botocore.exceptions.ClientError as e: + if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']: + for page in paginated_list(s3, **pagination_params): + yield [{'Key': data['Key']} for data in page] + + def list_keys(module, s3, bucket, prefix, marker, max_keys): pagination_params = {'Bucket': bucket} for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): @@ -429,10 +442,9 @@ def delete_bucket(module, s3, bucket): if exists is False: return False # if there are contents then we need to delete them before we can delete the bucket - for keys in paginated_list(s3, Bucket=bucket): - formatted_keys = [{'Key': key} for key in keys] - if formatted_keys: - s3.delete_objects(Bucket=bucket, Delete={'Objects': formatted_keys}) + for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): + if keys: + s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) s3.delete_bucket(Bucket=bucket) return True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: