- 新增图像生成接口,支持试用、积分和自定义API Key模式 - 实现生成图片结果异步上传至MinIO存储,带重试机制 - 优化积分预扣除和异常退还逻辑,保障用户积分准确 - 添加获取生成历史记录接口,支持时间范围和分页 - 提供本地字典配置接口,支持模型、比例、提示模板和尺寸 - 实现图片批量上传接口,支持S3兼容对象存储 feat(admin): 增加管理员角色管理与权限分配接口 - 实现角色列表查询、角色创建、更新及删除功能 - 增加权限列表查询接口 - 实现用户角色分配接口,便于统一管理用户权限 - 增加系统字典增删查改接口,支持分类过滤和排序 - 权限控制全面覆盖管理接口,保证安全访问 feat(auth): 完善用户登录注册及权限相关接口与页面 - 实现手机号验证码发送及校验功能,保障注册安全 - 支持手机号注册、登录及退出接口,集成日志记录 - 增加修改密码功能,验证原密码后更新 - 提供动态导航菜单接口,基于权限展示不同菜单 - 实现管理界面路由及日志、角色、字典管理页面访问权限控制 - 添加系统日志查询接口,支持关键词和等级筛选 feat(app): 初始化Flask应用并配置蓝图与数据库 - 创建应用程序工厂,加载配置,初始化数据库和Redis客户端 - 注册认证、API及管理员蓝图,整合路由 - 根路由渲染主页模板 - 应用上下文中自动创建数据库表,保证运行环境准备完毕 feat(database): 提供数据库创建与迁移支持脚本 - 新增数据库创建脚本,支持自动检测是否已存在 - 添加数据库表初始化脚本,支持创建和删除所有表 - 实现RBAC权限初始化,包含基础权限和角色创建 - 新增字段手动修复脚本,添加用户API Key和积分字段 - 强制迁移脚本支持清理连接和修复表结构,初始化默认数据及角色分配 feat(config): 新增系统配置参数 - 配置数据库、Redis、Session和MinIO相关参数 - 添加AI接口地址及试用Key配置 - 集成阿里云短信服务配置及开发模式相关参数 feat(extensions): 初始化数据库、Redis和MinIO客户端 - 创建全局SQLAlchemy数据库实例和Redis客户端 - 配置基于boto3的MinIO兼容S3客户端 chore(logs): 添加示例系统日志文件 - 记录用户请求、验证码发送成功与失败的日志信息
360 lines
12 KiB
Python
360 lines
12 KiB
Python
import asyncio
|
|
from contextlib import suppress
|
|
from typing import Any, Optional, Tuple, Union
|
|
|
|
from .base_protocol import BaseProtocol
|
|
from .client_exceptions import (
|
|
ClientConnectionError,
|
|
ClientOSError,
|
|
ClientPayloadError,
|
|
ServerDisconnectedError,
|
|
SocketTimeoutError,
|
|
)
|
|
from .helpers import (
|
|
_EXC_SENTINEL,
|
|
EMPTY_BODY_STATUS_CODES,
|
|
BaseTimerContext,
|
|
set_exception,
|
|
set_result,
|
|
)
|
|
from .http import HttpResponseParser, RawResponseMessage
|
|
from .http_exceptions import HttpProcessingError
|
|
from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader
|
|
|
|
|
|
class ResponseHandler(BaseProtocol, DataQueue[Tuple[RawResponseMessage, StreamReader]]):
|
|
"""Helper class to adapt between Protocol and StreamReader."""
|
|
|
|
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
|
|
BaseProtocol.__init__(self, loop=loop)
|
|
DataQueue.__init__(self, loop)
|
|
|
|
self._should_close = False
|
|
|
|
self._payload: Optional[StreamReader] = None
|
|
self._skip_payload = False
|
|
self._payload_parser = None
|
|
|
|
self._timer = None
|
|
|
|
self._tail = b""
|
|
self._upgraded = False
|
|
self._parser: Optional[HttpResponseParser] = None
|
|
|
|
self._read_timeout: Optional[float] = None
|
|
self._read_timeout_handle: Optional[asyncio.TimerHandle] = None
|
|
|
|
self._timeout_ceil_threshold: Optional[float] = 5
|
|
|
|
self._closed: Union[None, asyncio.Future[None]] = None
|
|
self._connection_lost_called = False
|
|
|
|
@property
|
|
def closed(self) -> Union[None, asyncio.Future[None]]:
|
|
"""Future that is set when the connection is closed.
|
|
|
|
This property returns a Future that will be completed when the connection
|
|
is closed. The Future is created lazily on first access to avoid creating
|
|
futures that will never be awaited.
|
|
|
|
Returns:
|
|
- A Future[None] if the connection is still open or was closed after
|
|
this property was accessed
|
|
- None if connection_lost() was already called before this property
|
|
was ever accessed (indicating no one is waiting for the closure)
|
|
"""
|
|
if self._closed is None and not self._connection_lost_called:
|
|
self._closed = self._loop.create_future()
|
|
return self._closed
|
|
|
|
@property
|
|
def upgraded(self) -> bool:
|
|
return self._upgraded
|
|
|
|
@property
|
|
def should_close(self) -> bool:
|
|
return bool(
|
|
self._should_close
|
|
or (self._payload is not None and not self._payload.is_eof())
|
|
or self._upgraded
|
|
or self._exception is not None
|
|
or self._payload_parser is not None
|
|
or self._buffer
|
|
or self._tail
|
|
)
|
|
|
|
def force_close(self) -> None:
|
|
self._should_close = True
|
|
|
|
def close(self) -> None:
|
|
self._exception = None # Break cyclic references
|
|
transport = self.transport
|
|
if transport is not None:
|
|
transport.close()
|
|
self.transport = None
|
|
self._payload = None
|
|
self._drop_timeout()
|
|
|
|
def abort(self) -> None:
|
|
self._exception = None # Break cyclic references
|
|
transport = self.transport
|
|
if transport is not None:
|
|
transport.abort()
|
|
self.transport = None
|
|
self._payload = None
|
|
self._drop_timeout()
|
|
|
|
def is_connected(self) -> bool:
|
|
return self.transport is not None and not self.transport.is_closing()
|
|
|
|
def connection_lost(self, exc: Optional[BaseException]) -> None:
|
|
self._connection_lost_called = True
|
|
self._drop_timeout()
|
|
|
|
original_connection_error = exc
|
|
reraised_exc = original_connection_error
|
|
|
|
connection_closed_cleanly = original_connection_error is None
|
|
|
|
if self._closed is not None:
|
|
# If someone is waiting for the closed future,
|
|
# we should set it to None or an exception. If
|
|
# self._closed is None, it means that
|
|
# connection_lost() was called already
|
|
# or nobody is waiting for it.
|
|
if connection_closed_cleanly:
|
|
set_result(self._closed, None)
|
|
else:
|
|
assert original_connection_error is not None
|
|
set_exception(
|
|
self._closed,
|
|
ClientConnectionError(
|
|
f"Connection lost: {original_connection_error !s}",
|
|
),
|
|
original_connection_error,
|
|
)
|
|
|
|
if self._payload_parser is not None:
|
|
with suppress(Exception): # FIXME: log this somehow?
|
|
self._payload_parser.feed_eof()
|
|
|
|
uncompleted = None
|
|
if self._parser is not None:
|
|
try:
|
|
uncompleted = self._parser.feed_eof()
|
|
except Exception as underlying_exc:
|
|
if self._payload is not None:
|
|
client_payload_exc_msg = (
|
|
f"Response payload is not completed: {underlying_exc !r}"
|
|
)
|
|
if not connection_closed_cleanly:
|
|
client_payload_exc_msg = (
|
|
f"{client_payload_exc_msg !s}. "
|
|
f"{original_connection_error !r}"
|
|
)
|
|
set_exception(
|
|
self._payload,
|
|
ClientPayloadError(client_payload_exc_msg),
|
|
underlying_exc,
|
|
)
|
|
|
|
if not self.is_eof():
|
|
if isinstance(original_connection_error, OSError):
|
|
reraised_exc = ClientOSError(*original_connection_error.args)
|
|
if connection_closed_cleanly:
|
|
reraised_exc = ServerDisconnectedError(uncompleted)
|
|
# assigns self._should_close to True as side effect,
|
|
# we do it anyway below
|
|
underlying_non_eof_exc = (
|
|
_EXC_SENTINEL
|
|
if connection_closed_cleanly
|
|
else original_connection_error
|
|
)
|
|
assert underlying_non_eof_exc is not None
|
|
assert reraised_exc is not None
|
|
self.set_exception(reraised_exc, underlying_non_eof_exc)
|
|
|
|
self._should_close = True
|
|
self._parser = None
|
|
self._payload = None
|
|
self._payload_parser = None
|
|
self._reading_paused = False
|
|
|
|
super().connection_lost(reraised_exc)
|
|
|
|
def eof_received(self) -> None:
|
|
# should call parser.feed_eof() most likely
|
|
self._drop_timeout()
|
|
|
|
def pause_reading(self) -> None:
|
|
super().pause_reading()
|
|
self._drop_timeout()
|
|
|
|
def resume_reading(self) -> None:
|
|
super().resume_reading()
|
|
self._reschedule_timeout()
|
|
|
|
def set_exception(
|
|
self,
|
|
exc: BaseException,
|
|
exc_cause: BaseException = _EXC_SENTINEL,
|
|
) -> None:
|
|
self._should_close = True
|
|
self._drop_timeout()
|
|
super().set_exception(exc, exc_cause)
|
|
|
|
def set_parser(self, parser: Any, payload: Any) -> None:
|
|
# TODO: actual types are:
|
|
# parser: WebSocketReader
|
|
# payload: WebSocketDataQueue
|
|
# but they are not generi enough
|
|
# Need an ABC for both types
|
|
self._payload = payload
|
|
self._payload_parser = parser
|
|
|
|
self._drop_timeout()
|
|
|
|
if self._tail:
|
|
data, self._tail = self._tail, b""
|
|
self.data_received(data)
|
|
|
|
def set_response_params(
|
|
self,
|
|
*,
|
|
timer: Optional[BaseTimerContext] = None,
|
|
skip_payload: bool = False,
|
|
read_until_eof: bool = False,
|
|
auto_decompress: bool = True,
|
|
read_timeout: Optional[float] = None,
|
|
read_bufsize: int = 2**16,
|
|
timeout_ceil_threshold: float = 5,
|
|
max_line_size: int = 8190,
|
|
max_field_size: int = 8190,
|
|
) -> None:
|
|
self._skip_payload = skip_payload
|
|
|
|
self._read_timeout = read_timeout
|
|
|
|
self._timeout_ceil_threshold = timeout_ceil_threshold
|
|
|
|
self._parser = HttpResponseParser(
|
|
self,
|
|
self._loop,
|
|
read_bufsize,
|
|
timer=timer,
|
|
payload_exception=ClientPayloadError,
|
|
response_with_body=not skip_payload,
|
|
read_until_eof=read_until_eof,
|
|
auto_decompress=auto_decompress,
|
|
max_line_size=max_line_size,
|
|
max_field_size=max_field_size,
|
|
)
|
|
|
|
if self._tail:
|
|
data, self._tail = self._tail, b""
|
|
self.data_received(data)
|
|
|
|
def _drop_timeout(self) -> None:
|
|
if self._read_timeout_handle is not None:
|
|
self._read_timeout_handle.cancel()
|
|
self._read_timeout_handle = None
|
|
|
|
def _reschedule_timeout(self) -> None:
|
|
timeout = self._read_timeout
|
|
if self._read_timeout_handle is not None:
|
|
self._read_timeout_handle.cancel()
|
|
|
|
if timeout:
|
|
self._read_timeout_handle = self._loop.call_later(
|
|
timeout, self._on_read_timeout
|
|
)
|
|
else:
|
|
self._read_timeout_handle = None
|
|
|
|
def start_timeout(self) -> None:
|
|
self._reschedule_timeout()
|
|
|
|
@property
|
|
def read_timeout(self) -> Optional[float]:
|
|
return self._read_timeout
|
|
|
|
@read_timeout.setter
|
|
def read_timeout(self, read_timeout: Optional[float]) -> None:
|
|
self._read_timeout = read_timeout
|
|
|
|
def _on_read_timeout(self) -> None:
|
|
exc = SocketTimeoutError("Timeout on reading data from socket")
|
|
self.set_exception(exc)
|
|
if self._payload is not None:
|
|
set_exception(self._payload, exc)
|
|
|
|
def data_received(self, data: bytes) -> None:
|
|
self._reschedule_timeout()
|
|
|
|
if not data:
|
|
return
|
|
|
|
# custom payload parser - currently always WebSocketReader
|
|
if self._payload_parser is not None:
|
|
eof, tail = self._payload_parser.feed_data(data)
|
|
if eof:
|
|
self._payload = None
|
|
self._payload_parser = None
|
|
|
|
if tail:
|
|
self.data_received(tail)
|
|
return
|
|
|
|
if self._upgraded or self._parser is None:
|
|
# i.e. websocket connection, websocket parser is not set yet
|
|
self._tail += data
|
|
return
|
|
|
|
# parse http messages
|
|
try:
|
|
messages, upgraded, tail = self._parser.feed_data(data)
|
|
except BaseException as underlying_exc:
|
|
if self.transport is not None:
|
|
# connection.release() could be called BEFORE
|
|
# data_received(), the transport is already
|
|
# closed in this case
|
|
self.transport.close()
|
|
# should_close is True after the call
|
|
if isinstance(underlying_exc, HttpProcessingError):
|
|
exc = HttpProcessingError(
|
|
code=underlying_exc.code,
|
|
message=underlying_exc.message,
|
|
headers=underlying_exc.headers,
|
|
)
|
|
else:
|
|
exc = HttpProcessingError()
|
|
self.set_exception(exc, underlying_exc)
|
|
return
|
|
|
|
self._upgraded = upgraded
|
|
|
|
payload: Optional[StreamReader] = None
|
|
for message, payload in messages:
|
|
if message.should_close:
|
|
self._should_close = True
|
|
|
|
self._payload = payload
|
|
|
|
if self._skip_payload or message.code in EMPTY_BODY_STATUS_CODES:
|
|
self.feed_data((message, EMPTY_PAYLOAD), 0)
|
|
else:
|
|
self.feed_data((message, payload), 0)
|
|
|
|
if payload is not None:
|
|
# new message(s) was processed
|
|
# register timeout handler unsubscribing
|
|
# either on end-of-stream or immediately for
|
|
# EMPTY_PAYLOAD
|
|
if payload is not EMPTY_PAYLOAD:
|
|
payload.on_eof(self._drop_timeout)
|
|
else:
|
|
self._drop_timeout()
|
|
|
|
if upgraded and tail:
|
|
self.data_received(tail)
|