- 新增图像生成接口,支持试用、积分和自定义API Key模式 - 实现生成图片结果异步上传至MinIO存储,带重试机制 - 优化积分预扣除和异常退还逻辑,保障用户积分准确 - 添加获取生成历史记录接口,支持时间范围和分页 - 提供本地字典配置接口,支持模型、比例、提示模板和尺寸 - 实现图片批量上传接口,支持S3兼容对象存储 feat(admin): 增加管理员角色管理与权限分配接口 - 实现角色列表查询、角色创建、更新及删除功能 - 增加权限列表查询接口 - 实现用户角色分配接口,便于统一管理用户权限 - 增加系统字典增删查改接口,支持分类过滤和排序 - 权限控制全面覆盖管理接口,保证安全访问 feat(auth): 完善用户登录注册及权限相关接口与页面 - 实现手机号验证码发送及校验功能,保障注册安全 - 支持手机号注册、登录及退出接口,集成日志记录 - 增加修改密码功能,验证原密码后更新 - 提供动态导航菜单接口,基于权限展示不同菜单 - 实现管理界面路由及日志、角色、字典管理页面访问权限控制 - 添加系统日志查询接口,支持关键词和等级筛选 feat(app): 初始化Flask应用并配置蓝图与数据库 - 创建应用程序工厂,加载配置,初始化数据库和Redis客户端 - 注册认证、API及管理员蓝图,整合路由 - 根路由渲染主页模板 - 应用上下文中自动创建数据库表,保证运行环境准备完毕 feat(database): 提供数据库创建与迁移支持脚本 - 新增数据库创建脚本,支持自动检测是否已存在 - 添加数据库表初始化脚本,支持创建和删除所有表 - 实现RBAC权限初始化,包含基础权限和角色创建 - 新增字段手动修复脚本,添加用户API Key和积分字段 - 强制迁移脚本支持清理连接和修复表结构,初始化默认数据及角色分配 feat(config): 新增系统配置参数 - 配置数据库、Redis、Session和MinIO相关参数 - 添加AI接口地址及试用Key配置 - 集成阿里云短信服务配置及开发模式相关参数 feat(extensions): 初始化数据库、Redis和MinIO客户端 - 创建全局SQLAlchemy数据库实例和Redis客户端 - 配置基于boto3的MinIO兼容S3客户端 chore(logs): 添加示例系统日志文件 - 记录用户请求、验证码发送成功与失败的日志信息
209 lines
8.4 KiB
Python
209 lines
8.4 KiB
Python
import string
|
|
import warnings
|
|
from json import loads
|
|
|
|
from jmespath.exceptions import LexerError, EmptyExpressionError
|
|
|
|
|
|
class Lexer(object):
|
|
START_IDENTIFIER = set(string.ascii_letters + '_')
|
|
VALID_IDENTIFIER = set(string.ascii_letters + string.digits + '_')
|
|
VALID_NUMBER = set(string.digits)
|
|
WHITESPACE = set(" \t\n\r")
|
|
SIMPLE_TOKENS = {
|
|
'.': 'dot',
|
|
'*': 'star',
|
|
']': 'rbracket',
|
|
',': 'comma',
|
|
':': 'colon',
|
|
'@': 'current',
|
|
'(': 'lparen',
|
|
')': 'rparen',
|
|
'{': 'lbrace',
|
|
'}': 'rbrace',
|
|
}
|
|
|
|
def tokenize(self, expression):
|
|
self._initialize_for_expression(expression)
|
|
while self._current is not None:
|
|
if self._current in self.SIMPLE_TOKENS:
|
|
yield {'type': self.SIMPLE_TOKENS[self._current],
|
|
'value': self._current,
|
|
'start': self._position, 'end': self._position + 1}
|
|
self._next()
|
|
elif self._current in self.START_IDENTIFIER:
|
|
start = self._position
|
|
buff = self._current
|
|
while self._next() in self.VALID_IDENTIFIER:
|
|
buff += self._current
|
|
yield {'type': 'unquoted_identifier', 'value': buff,
|
|
'start': start, 'end': start + len(buff)}
|
|
elif self._current in self.WHITESPACE:
|
|
self._next()
|
|
elif self._current == '[':
|
|
start = self._position
|
|
next_char = self._next()
|
|
if next_char == ']':
|
|
self._next()
|
|
yield {'type': 'flatten', 'value': '[]',
|
|
'start': start, 'end': start + 2}
|
|
elif next_char == '?':
|
|
self._next()
|
|
yield {'type': 'filter', 'value': '[?',
|
|
'start': start, 'end': start + 2}
|
|
else:
|
|
yield {'type': 'lbracket', 'value': '[',
|
|
'start': start, 'end': start + 1}
|
|
elif self._current == "'":
|
|
yield self._consume_raw_string_literal()
|
|
elif self._current == '|':
|
|
yield self._match_or_else('|', 'or', 'pipe')
|
|
elif self._current == '&':
|
|
yield self._match_or_else('&', 'and', 'expref')
|
|
elif self._current == '`':
|
|
yield self._consume_literal()
|
|
elif self._current in self.VALID_NUMBER:
|
|
start = self._position
|
|
buff = self._consume_number()
|
|
yield {'type': 'number', 'value': int(buff),
|
|
'start': start, 'end': start + len(buff)}
|
|
elif self._current == '-':
|
|
# Negative number.
|
|
start = self._position
|
|
buff = self._consume_number()
|
|
if len(buff) > 1:
|
|
yield {'type': 'number', 'value': int(buff),
|
|
'start': start, 'end': start + len(buff)}
|
|
else:
|
|
raise LexerError(lexer_position=start,
|
|
lexer_value=buff,
|
|
message="Unknown token '%s'" % buff)
|
|
elif self._current == '"':
|
|
yield self._consume_quoted_identifier()
|
|
elif self._current == '<':
|
|
yield self._match_or_else('=', 'lte', 'lt')
|
|
elif self._current == '>':
|
|
yield self._match_or_else('=', 'gte', 'gt')
|
|
elif self._current == '!':
|
|
yield self._match_or_else('=', 'ne', 'not')
|
|
elif self._current == '=':
|
|
if self._next() == '=':
|
|
yield {'type': 'eq', 'value': '==',
|
|
'start': self._position - 1, 'end': self._position}
|
|
self._next()
|
|
else:
|
|
if self._current is None:
|
|
# If we're at the EOF, we never advanced
|
|
# the position so we don't need to rewind
|
|
# it back one location.
|
|
position = self._position
|
|
else:
|
|
position = self._position - 1
|
|
raise LexerError(
|
|
lexer_position=position,
|
|
lexer_value='=',
|
|
message="Unknown token '='")
|
|
else:
|
|
raise LexerError(lexer_position=self._position,
|
|
lexer_value=self._current,
|
|
message="Unknown token %s" % self._current)
|
|
yield {'type': 'eof', 'value': '',
|
|
'start': self._length, 'end': self._length}
|
|
|
|
def _consume_number(self):
|
|
start = self._position
|
|
buff = self._current
|
|
while self._next() in self.VALID_NUMBER:
|
|
buff += self._current
|
|
return buff
|
|
|
|
def _initialize_for_expression(self, expression):
|
|
if not expression:
|
|
raise EmptyExpressionError()
|
|
self._position = 0
|
|
self._expression = expression
|
|
self._chars = list(self._expression)
|
|
self._current = self._chars[self._position]
|
|
self._length = len(self._expression)
|
|
|
|
def _next(self):
|
|
if self._position == self._length - 1:
|
|
self._current = None
|
|
else:
|
|
self._position += 1
|
|
self._current = self._chars[self._position]
|
|
return self._current
|
|
|
|
def _consume_until(self, delimiter):
|
|
# Consume until the delimiter is reached,
|
|
# allowing for the delimiter to be escaped with "\".
|
|
start = self._position
|
|
buff = ''
|
|
self._next()
|
|
while self._current != delimiter:
|
|
if self._current == '\\':
|
|
buff += '\\'
|
|
self._next()
|
|
if self._current is None:
|
|
# We're at the EOF.
|
|
raise LexerError(lexer_position=start,
|
|
lexer_value=self._expression[start:],
|
|
message="Unclosed %s delimiter" % delimiter)
|
|
buff += self._current
|
|
self._next()
|
|
# Skip the closing delimiter.
|
|
self._next()
|
|
return buff
|
|
|
|
def _consume_literal(self):
|
|
start = self._position
|
|
lexeme = self._consume_until('`').replace('\\`', '`')
|
|
try:
|
|
# Assume it is valid JSON and attempt to parse.
|
|
parsed_json = loads(lexeme)
|
|
except ValueError:
|
|
try:
|
|
# Invalid JSON values should be converted to quoted
|
|
# JSON strings during the JEP-12 deprecation period.
|
|
parsed_json = loads('"%s"' % lexeme.lstrip())
|
|
warnings.warn("deprecated string literal syntax",
|
|
PendingDeprecationWarning)
|
|
except ValueError:
|
|
raise LexerError(lexer_position=start,
|
|
lexer_value=self._expression[start:],
|
|
message="Bad token %s" % lexeme)
|
|
token_len = self._position - start
|
|
return {'type': 'literal', 'value': parsed_json,
|
|
'start': start, 'end': token_len}
|
|
|
|
def _consume_quoted_identifier(self):
|
|
start = self._position
|
|
lexeme = '"' + self._consume_until('"') + '"'
|
|
try:
|
|
token_len = self._position - start
|
|
return {'type': 'quoted_identifier', 'value': loads(lexeme),
|
|
'start': start, 'end': token_len}
|
|
except ValueError as e:
|
|
error_message = str(e).split(':')[0]
|
|
raise LexerError(lexer_position=start,
|
|
lexer_value=lexeme,
|
|
message=error_message)
|
|
|
|
def _consume_raw_string_literal(self):
|
|
start = self._position
|
|
lexeme = self._consume_until("'").replace("\\'", "'")
|
|
token_len = self._position - start
|
|
return {'type': 'literal', 'value': lexeme,
|
|
'start': start, 'end': token_len}
|
|
|
|
def _match_or_else(self, expected, match_type, else_type):
|
|
start = self._position
|
|
current = self._current
|
|
next_char = self._next()
|
|
if next_char == expected:
|
|
self._next()
|
|
return {'type': match_type, 'value': current + next_char,
|
|
'start': start, 'end': start + 1}
|
|
return {'type': else_type, 'value': current,
|
|
'start': start, 'end': start}
|