ai_v/venv/Lib/site-packages/apscheduler/jobstores/rethinkdb.py
24024 af7c11d7f9 feat(api): 实现图像生成及后台同步功能
- 新增图像生成接口,支持试用、积分和自定义API Key模式
- 实现生成图片结果异步上传至MinIO存储,带重试机制
- 优化积分预扣除和异常退还逻辑,保障用户积分准确
- 添加获取生成历史记录接口,支持时间范围和分页
- 提供本地字典配置接口,支持模型、比例、提示模板和尺寸
- 实现图片批量上传接口,支持S3兼容对象存储

feat(admin): 增加管理员角色管理与权限分配接口

- 实现角色列表查询、角色创建、更新及删除功能
- 增加权限列表查询接口
- 实现用户角色分配接口,便于统一管理用户权限
- 增加系统字典增删查改接口,支持分类过滤和排序
- 权限控制全面覆盖管理接口,保证安全访问

feat(auth): 完善用户登录注册及权限相关接口与页面

- 实现手机号验证码发送及校验功能,保障注册安全
- 支持手机号注册、登录及退出接口,集成日志记录
- 增加修改密码功能,验证原密码后更新
- 提供动态导航菜单接口,基于权限展示不同菜单
- 实现管理界面路由及日志、角色、字典管理页面访问权限控制
- 添加系统日志查询接口,支持关键词和等级筛选

feat(app): 初始化Flask应用并配置蓝图与数据库

- 创建应用程序工厂,加载配置,初始化数据库和Redis客户端
- 注册认证、API及管理员蓝图,整合路由
- 根路由渲染主页模板
- 应用上下文中自动创建数据库表,保证运行环境准备完毕

feat(database): 提供数据库创建与迁移支持脚本

- 新增数据库创建脚本,支持自动检测是否已存在
- 添加数据库表初始化脚本,支持创建和删除所有表
- 实现RBAC权限初始化,包含基础权限和角色创建
- 新增字段手动修复脚本,添加用户API Key和积分字段
- 强制迁移脚本支持清理连接和修复表结构,初始化默认数据及角色分配

feat(config): 新增系统配置参数

- 配置数据库、Redis、Session和MinIO相关参数
- 添加AI接口地址及试用Key配置
- 集成阿里云短信服务配置及开发模式相关参数

feat(extensions): 初始化数据库、Redis和MinIO客户端

- 创建全局SQLAlchemy数据库实例和Redis客户端
- 配置基于boto3的MinIO兼容S3客户端

chore(logs): 添加示例系统日志文件

- 记录用户请求、验证码发送成功与失败的日志信息
2026-01-12 00:53:31 +08:00

174 lines
5.8 KiB
Python

import pickle
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import (
datetime_to_utc_timestamp,
maybe_ref,
utc_timestamp_to_datetime,
)
try:
from rethinkdb import RethinkDB
except ImportError as exc: # pragma: nocover
raise ImportError("RethinkDBJobStore requires rethinkdb installed") from exc
class RethinkDBJobStore(BaseJobStore):
"""
Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to
rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_.
Plugin alias: ``rethinkdb``
:param str database: database to store jobs in
:param str collection: collection to store jobs in
:param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing
connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(
self,
database="apscheduler",
table="jobs",
client=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args,
):
super().__init__()
if not database:
raise ValueError('The "database" parameter must not be empty')
if not table:
raise ValueError('The "table" parameter must not be empty')
self.database = database
self.table_name = table
self.table = None
self.client = client
self.pickle_protocol = pickle_protocol
self.connect_args = connect_args
self.r = RethinkDB()
self.conn = None
def start(self, scheduler, alias):
super().start(scheduler, alias)
if self.client:
self.conn = maybe_ref(self.client)
else:
self.conn = self.r.connect(db=self.database, **self.connect_args)
if self.database not in self.r.db_list().run(self.conn):
self.r.db_create(self.database).run(self.conn)
if self.table_name not in self.r.table_list().run(self.conn):
self.r.table_create(self.table_name).run(self.conn)
if "next_run_time" not in self.r.table(self.table_name).index_list().run(
self.conn
):
self.r.table(self.table_name).index_create("next_run_time").run(self.conn)
self.table = self.r.db(self.database).table(self.table_name)
def lookup_job(self, job_id):
results = list(self.table.get_all(job_id).pluck("job_state").run(self.conn))
return self._reconstitute_job(results[0]["job_state"]) if results else None
def get_due_jobs(self, now):
return self._get_jobs(
self.r.row["next_run_time"] <= datetime_to_utc_timestamp(now)
)
def get_next_run_time(self):
results = list(
self.table.filter(self.r.row["next_run_time"] != None)
.order_by(self.r.asc("next_run_time"))
.map(lambda x: x["next_run_time"])
.limit(1)
.run(self.conn)
)
return utc_timestamp_to_datetime(results[0]) if results else None
def get_all_jobs(self):
jobs = self._get_jobs()
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
job_dict = {
"id": job.id,
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": self.r.binary(
pickle.dumps(job.__getstate__(), self.pickle_protocol)
),
}
results = self.table.insert(job_dict).run(self.conn)
if results["errors"] > 0:
raise ConflictingIdError(job.id)
def update_job(self, job):
changes = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": self.r.binary(
pickle.dumps(job.__getstate__(), self.pickle_protocol)
),
}
results = self.table.get_all(job.id).update(changes).run(self.conn)
skipped = False in map(lambda x: results[x] == 0, results.keys())
if results["skipped"] > 0 or results["errors"] > 0 or not skipped:
raise JobLookupError(job.id)
def remove_job(self, job_id):
results = self.table.get_all(job_id).delete().run(self.conn)
if results["deleted"] + results["skipped"] != 1:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.table.delete().run(self.conn)
def shutdown(self):
self.conn.close()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, predicate=None):
jobs = []
failed_job_ids = []
query = (
self.table.filter(self.r.row["next_run_time"] != None).filter(predicate)
if predicate
else self.table
)
query = query.order_by("next_run_time", "id").pluck("id", "job_state")
for document in query.run(self.conn):
try:
jobs.append(self._reconstitute_job(document["job_state"]))
except Exception:
self._logger.exception(
'Unable to restore job "%s" -- removing it', document["id"]
)
failed_job_ids.append(document["id"])
# Remove all the jobs we failed to restore
if failed_job_ids:
self.r.expr(failed_job_ids).for_each(
lambda job_id: self.table.get_all(job_id).delete()
).run(self.conn)
return jobs
def __repr__(self):
connection = self.conn
return f"<{self.__class__.__name__} (connection={connection})>"