- 新增图像生成接口,支持试用、积分和自定义API Key模式 - 实现生成图片结果异步上传至MinIO存储,带重试机制 - 优化积分预扣除和异常退还逻辑,保障用户积分准确 - 添加获取生成历史记录接口,支持时间范围和分页 - 提供本地字典配置接口,支持模型、比例、提示模板和尺寸 - 实现图片批量上传接口,支持S3兼容对象存储 feat(admin): 增加管理员角色管理与权限分配接口 - 实现角色列表查询、角色创建、更新及删除功能 - 增加权限列表查询接口 - 实现用户角色分配接口,便于统一管理用户权限 - 增加系统字典增删查改接口,支持分类过滤和排序 - 权限控制全面覆盖管理接口,保证安全访问 feat(auth): 完善用户登录注册及权限相关接口与页面 - 实现手机号验证码发送及校验功能,保障注册安全 - 支持手机号注册、登录及退出接口,集成日志记录 - 增加修改密码功能,验证原密码后更新 - 提供动态导航菜单接口,基于权限展示不同菜单 - 实现管理界面路由及日志、角色、字典管理页面访问权限控制 - 添加系统日志查询接口,支持关键词和等级筛选 feat(app): 初始化Flask应用并配置蓝图与数据库 - 创建应用程序工厂,加载配置,初始化数据库和Redis客户端 - 注册认证、API及管理员蓝图,整合路由 - 根路由渲染主页模板 - 应用上下文中自动创建数据库表,保证运行环境准备完毕 feat(database): 提供数据库创建与迁移支持脚本 - 新增数据库创建脚本,支持自动检测是否已存在 - 添加数据库表初始化脚本,支持创建和删除所有表 - 实现RBAC权限初始化,包含基础权限和角色创建 - 新增字段手动修复脚本,添加用户API Key和积分字段 - 强制迁移脚本支持清理连接和修复表结构,初始化默认数据及角色分配 feat(config): 新增系统配置参数 - 配置数据库、Redis、Session和MinIO相关参数 - 添加AI接口地址及试用Key配置 - 集成阿里云短信服务配置及开发模式相关参数 feat(extensions): 初始化数据库、Redis和MinIO客户端 - 创建全局SQLAlchemy数据库实例和Redis客户端 - 配置基于boto3的MinIO兼容S3客户端 chore(logs): 添加示例系统日志文件 - 记录用户请求、验证码发送成功与失败的日志信息
414 lines
15 KiB
Python
414 lines
15 KiB
Python
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
|
# may not use this file except in compliance with the License. A copy of
|
|
# the License is located at
|
|
#
|
|
# http://aws.amazon.com/apache2.0/
|
|
#
|
|
# or in the "license" file accompanying this file. This file is
|
|
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
|
# ANY KIND, either express or implied. See the License for the specific
|
|
# language governing permissions and limitations under the License.
|
|
import copy
|
|
import math
|
|
|
|
from botocore.exceptions import ClientError
|
|
|
|
from s3transfer.exceptions import S3CopyFailedError
|
|
from s3transfer.tasks import (
|
|
CompleteMultipartUploadTask,
|
|
CreateMultipartUploadTask,
|
|
SubmissionTask,
|
|
Task,
|
|
)
|
|
from s3transfer.utils import (
|
|
ChunksizeAdjuster,
|
|
calculate_range_parameter,
|
|
get_callbacks,
|
|
get_filtered_dict,
|
|
)
|
|
|
|
|
|
class CopySubmissionTask(SubmissionTask):
|
|
"""Task for submitting tasks to execute a copy"""
|
|
|
|
EXTRA_ARGS_TO_HEAD_ARGS_MAPPING = {
|
|
'CopySourceIfMatch': 'IfMatch',
|
|
'CopySourceIfModifiedSince': 'IfModifiedSince',
|
|
'CopySourceIfNoneMatch': 'IfNoneMatch',
|
|
'CopySourceIfUnmodifiedSince': 'IfUnmodifiedSince',
|
|
'CopySourceSSECustomerKey': 'SSECustomerKey',
|
|
'CopySourceSSECustomerAlgorithm': 'SSECustomerAlgorithm',
|
|
'CopySourceSSECustomerKeyMD5': 'SSECustomerKeyMD5',
|
|
'RequestPayer': 'RequestPayer',
|
|
'ExpectedBucketOwner': 'ExpectedBucketOwner',
|
|
}
|
|
|
|
UPLOAD_PART_COPY_ARGS = [
|
|
'CopySourceIfMatch',
|
|
'CopySourceIfModifiedSince',
|
|
'CopySourceIfNoneMatch',
|
|
'CopySourceIfUnmodifiedSince',
|
|
'CopySourceSSECustomerKey',
|
|
'CopySourceSSECustomerAlgorithm',
|
|
'CopySourceSSECustomerKeyMD5',
|
|
'SSECustomerKey',
|
|
'SSECustomerAlgorithm',
|
|
'SSECustomerKeyMD5',
|
|
'RequestPayer',
|
|
'ExpectedBucketOwner',
|
|
]
|
|
|
|
CREATE_MULTIPART_ARGS_BLACKLIST = [
|
|
'CopySourceIfMatch',
|
|
'CopySourceIfModifiedSince',
|
|
'CopySourceIfNoneMatch',
|
|
'CopySourceIfUnmodifiedSince',
|
|
'CopySourceSSECustomerKey',
|
|
'CopySourceSSECustomerAlgorithm',
|
|
'CopySourceSSECustomerKeyMD5',
|
|
'MetadataDirective',
|
|
'TaggingDirective',
|
|
]
|
|
|
|
COMPLETE_MULTIPART_ARGS = [
|
|
'SSECustomerKey',
|
|
'SSECustomerAlgorithm',
|
|
'SSECustomerKeyMD5',
|
|
'RequestPayer',
|
|
'ExpectedBucketOwner',
|
|
]
|
|
|
|
def _submit(
|
|
self, client, config, osutil, request_executor, transfer_future
|
|
):
|
|
"""
|
|
:param client: The client associated with the transfer manager
|
|
|
|
:type config: s3transfer.manager.TransferConfig
|
|
:param config: The transfer config associated with the transfer
|
|
manager
|
|
|
|
:type osutil: s3transfer.utils.OSUtil
|
|
:param osutil: The os utility associated to the transfer manager
|
|
|
|
:type request_executor: s3transfer.futures.BoundedExecutor
|
|
:param request_executor: The request executor associated with the
|
|
transfer manager
|
|
|
|
:type transfer_future: s3transfer.futures.TransferFuture
|
|
:param transfer_future: The transfer future associated with the
|
|
transfer request that tasks are being submitted for
|
|
"""
|
|
if (
|
|
transfer_future.meta.size is None
|
|
or transfer_future.meta.etag is None
|
|
):
|
|
# If a size was not provided figure out the size for the
|
|
# user. Note that we will only use the client provided to
|
|
# the TransferManager. If the object is outside of the region
|
|
# of the client, they may have to provide the file size themselves
|
|
# with a completely new client.
|
|
call_args = transfer_future.meta.call_args
|
|
head_object_request = (
|
|
self._get_head_object_request_from_copy_source(
|
|
call_args.copy_source
|
|
)
|
|
)
|
|
extra_args = call_args.extra_args
|
|
|
|
# Map any values that may be used in the head object that is
|
|
# used in the copy object
|
|
for param, value in extra_args.items():
|
|
if param in self.EXTRA_ARGS_TO_HEAD_ARGS_MAPPING:
|
|
head_object_request[
|
|
self.EXTRA_ARGS_TO_HEAD_ARGS_MAPPING[param]
|
|
] = value
|
|
|
|
response = call_args.source_client.head_object(
|
|
**head_object_request
|
|
)
|
|
transfer_future.meta.provide_transfer_size(
|
|
response['ContentLength']
|
|
)
|
|
# Provide an etag to ensure a stored object is not modified
|
|
# during a multipart copy.
|
|
transfer_future.meta.provide_object_etag(response.get('ETag'))
|
|
|
|
# If it is greater than threshold do a multipart copy, otherwise
|
|
# do a regular copy object.
|
|
if transfer_future.meta.size < config.multipart_threshold:
|
|
self._submit_copy_request(
|
|
client, config, osutil, request_executor, transfer_future
|
|
)
|
|
else:
|
|
self._submit_multipart_request(
|
|
client, config, osutil, request_executor, transfer_future
|
|
)
|
|
|
|
def _submit_copy_request(
|
|
self, client, config, osutil, request_executor, transfer_future
|
|
):
|
|
call_args = transfer_future.meta.call_args
|
|
|
|
# Get the needed progress callbacks for the task
|
|
progress_callbacks = get_callbacks(transfer_future, 'progress')
|
|
|
|
# Submit the request of a single copy.
|
|
self._transfer_coordinator.submit(
|
|
request_executor,
|
|
CopyObjectTask(
|
|
transfer_coordinator=self._transfer_coordinator,
|
|
main_kwargs={
|
|
'client': client,
|
|
'copy_source': call_args.copy_source,
|
|
'bucket': call_args.bucket,
|
|
'key': call_args.key,
|
|
'extra_args': call_args.extra_args,
|
|
'callbacks': progress_callbacks,
|
|
'size': transfer_future.meta.size,
|
|
},
|
|
is_final=True,
|
|
),
|
|
)
|
|
|
|
def _submit_multipart_request(
|
|
self, client, config, osutil, request_executor, transfer_future
|
|
):
|
|
call_args = transfer_future.meta.call_args
|
|
|
|
# Submit the request to create a multipart upload and make sure it
|
|
# does not include any of the arguments used for copy part.
|
|
create_multipart_extra_args = {}
|
|
for param, val in call_args.extra_args.items():
|
|
if param not in self.CREATE_MULTIPART_ARGS_BLACKLIST:
|
|
create_multipart_extra_args[param] = val
|
|
|
|
create_multipart_future = self._transfer_coordinator.submit(
|
|
request_executor,
|
|
CreateMultipartUploadTask(
|
|
transfer_coordinator=self._transfer_coordinator,
|
|
main_kwargs={
|
|
'client': client,
|
|
'bucket': call_args.bucket,
|
|
'key': call_args.key,
|
|
'extra_args': create_multipart_extra_args,
|
|
},
|
|
),
|
|
)
|
|
|
|
# Determine how many parts are needed based on filesize and
|
|
# desired chunksize.
|
|
part_size = config.multipart_chunksize
|
|
adjuster = ChunksizeAdjuster()
|
|
part_size = adjuster.adjust_chunksize(
|
|
part_size, transfer_future.meta.size
|
|
)
|
|
num_parts = int(
|
|
math.ceil(transfer_future.meta.size / float(part_size))
|
|
)
|
|
|
|
# Submit requests to upload the parts of the file.
|
|
part_futures = []
|
|
progress_callbacks = get_callbacks(transfer_future, 'progress')
|
|
|
|
for part_number in range(1, num_parts + 1):
|
|
extra_part_args = self._extra_upload_part_args(
|
|
call_args.extra_args
|
|
)
|
|
# The part number for upload part starts at 1 while the
|
|
# range parameter starts at zero, so just subtract 1 off of
|
|
# the part number
|
|
extra_part_args['CopySourceRange'] = calculate_range_parameter(
|
|
part_size,
|
|
part_number - 1,
|
|
num_parts,
|
|
transfer_future.meta.size,
|
|
)
|
|
if transfer_future.meta.etag is not None:
|
|
extra_part_args['CopySourceIfMatch'] = (
|
|
transfer_future.meta.etag
|
|
)
|
|
# Get the size of the part copy as well for the progress
|
|
# callbacks.
|
|
size = self._get_transfer_size(
|
|
part_size,
|
|
part_number - 1,
|
|
num_parts,
|
|
transfer_future.meta.size,
|
|
)
|
|
# Get the checksum algorithm of the multipart request.
|
|
checksum_algorithm = call_args.extra_args.get("ChecksumAlgorithm")
|
|
part_futures.append(
|
|
self._transfer_coordinator.submit(
|
|
request_executor,
|
|
CopyPartTask(
|
|
transfer_coordinator=self._transfer_coordinator,
|
|
main_kwargs={
|
|
'client': client,
|
|
'copy_source': call_args.copy_source,
|
|
'bucket': call_args.bucket,
|
|
'key': call_args.key,
|
|
'part_number': part_number,
|
|
'extra_args': extra_part_args,
|
|
'callbacks': progress_callbacks,
|
|
'size': size,
|
|
'checksum_algorithm': checksum_algorithm,
|
|
},
|
|
pending_main_kwargs={
|
|
'upload_id': create_multipart_future
|
|
},
|
|
),
|
|
)
|
|
)
|
|
|
|
complete_multipart_extra_args = self._extra_complete_multipart_args(
|
|
call_args.extra_args
|
|
)
|
|
# Submit the request to complete the multipart upload.
|
|
self._transfer_coordinator.submit(
|
|
request_executor,
|
|
CompleteMultipartUploadTask(
|
|
transfer_coordinator=self._transfer_coordinator,
|
|
main_kwargs={
|
|
'client': client,
|
|
'bucket': call_args.bucket,
|
|
'key': call_args.key,
|
|
'extra_args': complete_multipart_extra_args,
|
|
},
|
|
pending_main_kwargs={
|
|
'upload_id': create_multipart_future,
|
|
'parts': part_futures,
|
|
},
|
|
is_final=True,
|
|
),
|
|
)
|
|
|
|
def _get_head_object_request_from_copy_source(self, copy_source):
|
|
if isinstance(copy_source, dict):
|
|
return copy.copy(copy_source)
|
|
else:
|
|
raise TypeError(
|
|
'Expecting dictionary formatted: '
|
|
'{"Bucket": bucket_name, "Key": key} '
|
|
f'but got {copy_source} or type {type(copy_source)}.'
|
|
)
|
|
|
|
def _extra_upload_part_args(self, extra_args):
|
|
# Only the args in COPY_PART_ARGS actually need to be passed
|
|
# onto the upload_part_copy calls.
|
|
return get_filtered_dict(extra_args, self.UPLOAD_PART_COPY_ARGS)
|
|
|
|
def _extra_complete_multipart_args(self, extra_args):
|
|
return get_filtered_dict(extra_args, self.COMPLETE_MULTIPART_ARGS)
|
|
|
|
def _get_transfer_size(
|
|
self, part_size, part_index, num_parts, total_transfer_size
|
|
):
|
|
if part_index == num_parts - 1:
|
|
# The last part may be different in size then the rest of the
|
|
# parts.
|
|
return total_transfer_size - (part_index * part_size)
|
|
return part_size
|
|
|
|
|
|
class CopyObjectTask(Task):
|
|
"""Task to do a nonmultipart copy"""
|
|
|
|
def _main(
|
|
self, client, copy_source, bucket, key, extra_args, callbacks, size
|
|
):
|
|
"""
|
|
:param client: The client to use when calling PutObject
|
|
:param copy_source: The CopySource parameter to use
|
|
:param bucket: The name of the bucket to copy to
|
|
:param key: The name of the key to copy to
|
|
:param extra_args: A dictionary of any extra arguments that may be
|
|
used in the upload.
|
|
:param callbacks: List of callbacks to call after copy
|
|
:param size: The size of the transfer. This value is passed into
|
|
the callbacks
|
|
|
|
"""
|
|
client.copy_object(
|
|
CopySource=copy_source, Bucket=bucket, Key=key, **extra_args
|
|
)
|
|
for callback in callbacks:
|
|
callback(bytes_transferred=size)
|
|
|
|
|
|
class CopyPartTask(Task):
|
|
"""Task to upload a part in a multipart copy"""
|
|
|
|
def _main(
|
|
self,
|
|
client,
|
|
copy_source,
|
|
bucket,
|
|
key,
|
|
upload_id,
|
|
part_number,
|
|
extra_args,
|
|
callbacks,
|
|
size,
|
|
checksum_algorithm=None,
|
|
):
|
|
"""
|
|
:param client: The client to use when calling PutObject
|
|
:param copy_source: The CopySource parameter to use
|
|
:param bucket: The name of the bucket to upload to
|
|
:param key: The name of the key to upload to
|
|
:param upload_id: The id of the upload
|
|
:param part_number: The number representing the part of the multipart
|
|
upload
|
|
:param extra_args: A dictionary of any extra arguments that may be
|
|
used in the upload.
|
|
:param callbacks: List of callbacks to call after copy part
|
|
:param size: The size of the transfer. This value is passed into
|
|
the callbacks
|
|
:param checksum_algorithm: The algorithm that was used to create the multipart
|
|
upload
|
|
|
|
:rtype: dict
|
|
:returns: A dictionary representing a part::
|
|
|
|
{'Etag': etag_value, 'PartNumber': part_number}
|
|
|
|
This value can be appended to a list to be used to complete
|
|
the multipart upload. If a checksum is in the response,
|
|
it will also be included.
|
|
"""
|
|
try:
|
|
response = client.upload_part_copy(
|
|
CopySource=copy_source,
|
|
Bucket=bucket,
|
|
Key=key,
|
|
UploadId=upload_id,
|
|
PartNumber=part_number,
|
|
**extra_args,
|
|
)
|
|
except ClientError as e:
|
|
error_code = e.response.get('Error', {}).get('Code')
|
|
src_key = copy_source['Key']
|
|
src_bucket = copy_source['Bucket']
|
|
if error_code == "PreconditionFailed":
|
|
raise S3CopyFailedError(
|
|
f'Contents of stored object "{src_key}" '
|
|
f'in bucket "{src_bucket}" did not match '
|
|
'expected ETag.'
|
|
)
|
|
else:
|
|
raise
|
|
for callback in callbacks:
|
|
callback(bytes_transferred=size)
|
|
etag = response['CopyPartResult']['ETag']
|
|
part_metadata = {'ETag': etag, 'PartNumber': part_number}
|
|
if checksum_algorithm:
|
|
checksum_member = f'Checksum{checksum_algorithm.upper()}'
|
|
if checksum_member in response['CopyPartResult']:
|
|
part_metadata[checksum_member] = response['CopyPartResult'][
|
|
checksum_member
|
|
]
|
|
return part_metadata
|