有两种方法可以解决这个问题:
以下是示例代码:
import boto3
s3 = boto3.resource('s3')
def lambda_handler(event, context): src_bucket = event['Records'][0]['s3']['bucket']['name'] dst_bucket = 'destination-bucket-name' key = event['Records'][0]['s3']['object']['key'] file_size = event['Records'][0]['s3']['object']['size']
if file_size > 5242880:
print('File size greater than 5 MB. Increase Lambda function memory and timeout limits.')
return 'Lambda function memory and timeout limits too low.'
copy_source = {
'Bucket': src_bucket,
'Key': key
}
s3.meta.client.copy(copy_source, dst_bucket, key)
return 'File successfully copied to destination bucket.'
以下是示例代码:
import boto3 import os
s3 = boto3.client('s3')
def upload_to_s3(bucket_name, file_name): # set the part size and the file size part_size = 5242880 file_size = os.path.getsize(file_name)
# initiate the multipart upload
response = s3.create_multipart_upload(
Bucket=bucket_name,
Key=file_name
)
upload_id = response["UploadId"]
parts = []
# upload the file in parts
with open(file_name, "rb") as file:
for i in range(0, file_size, part_size):
part_number = int(i / part_size) + 1
data = file.read(part_size)
response = s3.upload_part(
Body=data,
Bucket=bucket_name,
Key=file_name,
PartNumber=part_number,
UploadId=upload_id
)
parts.append({
"PartNumber": part_number,
"ETag": response["ETag"]
})
# complete the multipart upload
response = s3.complete_multipart_upload(
Bucket=bucket_name,
Key=file_name,
MultipartUpload={
"Parts": parts
},
UploadId=upload_id
)
return response['Location']
upload_to_s3('destination-bucket-name', 'large-file.mp4')