I had a similar issue with taking ownership of log objects in a quite large bucket.
Total number of objects - 3,290,956 Total size 1.4 TB.
The solutions I was able to find were far too sluggish for that amount of objects. I ended up writing some code that was able to do the job several times faster than
aws s3 cp
You will need to install requirements:
pip install pathos boto3 click
#!/usr/bin/env python3
import logging
import os
import sys
import boto3
import botocore
import click
from time import time
from botocore.config import Config
from pathos.pools import ThreadPool as Pool
logger = logging.getLogger(__name__)
streamformater = logging.Formatter("[*] %(levelname)s: %(asctime)s: %(message)s")
logstreamhandler = logging.StreamHandler()
logstreamhandler.setFormatter(streamformater)
def _set_log_level(ctx, param, value):
if value:
ctx.ensure_object(dict)
ctx.obj["log_level"] = value
logger.setLevel(value)
if value <= 20:
logger.info(f"Logger set to {logging.getLevelName(logger.getEffectiveLevel())}")
return value
@click.group(chain=False)
@click.version_option(version='0.1.0')
@click.pass_context
def cli(ctx):
"""
Take object ownership of S3 bucket objects.
"""
ctx.ensure_object(dict)
ctx.obj["aws_config"] = Config(
retries={
'max_attempts': 10,
'mode': 'standard'
}
)
@cli.command("own")
@click.argument("bucket", type=click.STRING)
@click.argument("prefix", type=click.STRING, default="/")
@click.option("--profile", type=click.STRING, default="default", envvar="AWS_DEFAULT_PROFILE", help="Configuration profile from ~/.aws/{credentials,config}")
@click.option("--region", type=click.STRING, default="us-east-1", envvar="AWS_DEFAULT_REGION", help="AWS region")
@click.option("--threads", "-t", type=click.INT, default=40, help="Threads to use")
@click.option("--loglevel", "log_level", hidden=True, flag_value=logging.INFO, callback=_set_log_level, expose_value=False, is_eager=True, default=True)
@click.option("--verbose", "-v", "log_level", flag_value=logging.DEBUG, callback=_set_log_level, expose_value=False, is_eager=True, help="Increase log_level")
@click.pass_context
def command_own(ctx, *args, **kwargs):
ctx.obj.update(kwargs)
profile_name = ctx.obj.get("profile")
region = ctx.obj.get("region")
bucket = ctx.obj.get("bucket")
prefix = ctx.obj.get("prefix").lstrip("/")
threads = ctx.obj.get("threads")
pool = Pool(nodes=threads)
logger.addHandler(logstreamhandler)
logger.info(f"Getting ownership of all objects in s3://{bucket}/{prefix}")
start = time()
try:
SESSION: boto3.Session = boto3.session.Session(profile_name=profile_name)
except botocore.exceptions.ProfileNotFound as e:
logger.warning(f"Profile {profile_name} was not found.")
logger.warning(f"Falling back to environment variables for AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
AWS_SESSION_TOKEN = os.environ.get("AWS_SESSION_TOKEN", "")
if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
if AWS_SESSION_TOKEN:
SESSION: boto3.Session = boto3.session.Session(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
aws_session_token=AWS_SESSION_TOKEN)
else:
SESSION: boto3.Session = boto3.session.Session(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
else:
logger.error("Unable to find AWS credentials.")
sys.exit(1)
s3c = SESSION.client('s3', config=ctx.obj["aws_config"])
def bucket_keys(Bucket, Prefix='', StartAfter='', Delimiter='/'):
Prefix = Prefix[1:] if Prefix.startswith(Delimiter) else Prefix
if not StartAfter:
del StartAfter
if Prefix.endswith(Delimiter):
StartAfter = Prefix
del Delimiter
for page in s3c.get_paginator('list_objects_v2').paginate(Bucket=Bucket, Prefix=Prefix):
for content in page.get('Contents', ()):
yield content['Key']
def worker(key):
logger.info(f"Processing: {key}")
s3c.copy_object(Bucket=bucket, Key=key,
CopySource={'Bucket': bucket, 'Key': key},
ACL='bucket-owner-full-control',
StorageClass="STANDARD"
)
object_keys = bucket_keys(bucket, prefix)
pool.map(worker, object_keys)
end = time()
logger.info(f"Completed for {end - start:.2f} seconds.")
if __name__ == '__main__':
cli()
Usage:
get_object_ownership.py own -v my-big-aws-logs-bucket /prefix
The bucket mentioned above was processed for ~7 hours using 40 threads.
[*] INFO: 2021-08-05 19:53:55,542: Completed for 25320.45 seconds.
Some more speed comparison using AWS cli vs this tool on the same subset of data:
aws s3 cp --recursive --acl bucket-owner-full-control --metadata-directive
53.59s user 7.24s system 20% cpu 5:02.42 total
vs
[*] INFO: 2021-08-06 09:07:43,506: Completed for 49.09 seconds.
ObjectACL
just supportfiles
andbucket
, not supportfolder
. So you cannot define ACL forfolder
. The simplest solution that you define ACL for bucket level. Example:"Resource": "arn:aws:s3:::BUCKET_NAME/*"
– Cruce