ai_v/venv/Lib/site-packages/apscheduler/executors/base.py
24024 af7c11d7f9 feat(api): 实现图像生成及后台同步功能
- 新增图像生成接口,支持试用、积分和自定义API Key模式
- 实现生成图片结果异步上传至MinIO存储,带重试机制
- 优化积分预扣除和异常退还逻辑,保障用户积分准确
- 添加获取生成历史记录接口,支持时间范围和分页
- 提供本地字典配置接口,支持模型、比例、提示模板和尺寸
- 实现图片批量上传接口,支持S3兼容对象存储

feat(admin): 增加管理员角色管理与权限分配接口

- 实现角色列表查询、角色创建、更新及删除功能
- 增加权限列表查询接口
- 实现用户角色分配接口,便于统一管理用户权限
- 增加系统字典增删查改接口,支持分类过滤和排序
- 权限控制全面覆盖管理接口,保证安全访问

feat(auth): 完善用户登录注册及权限相关接口与页面

- 实现手机号验证码发送及校验功能,保障注册安全
- 支持手机号注册、登录及退出接口,集成日志记录
- 增加修改密码功能,验证原密码后更新
- 提供动态导航菜单接口,基于权限展示不同菜单
- 实现管理界面路由及日志、角色、字典管理页面访问权限控制
- 添加系统日志查询接口,支持关键词和等级筛选

feat(app): 初始化Flask应用并配置蓝图与数据库

- 创建应用程序工厂,加载配置,初始化数据库和Redis客户端
- 注册认证、API及管理员蓝图,整合路由
- 根路由渲染主页模板
- 应用上下文中自动创建数据库表,保证运行环境准备完毕

feat(database): 提供数据库创建与迁移支持脚本

- 新增数据库创建脚本,支持自动检测是否已存在
- 添加数据库表初始化脚本,支持创建和删除所有表
- 实现RBAC权限初始化,包含基础权限和角色创建
- 新增字段手动修复脚本,添加用户API Key和积分字段
- 强制迁移脚本支持清理连接和修复表结构,初始化默认数据及角色分配

feat(config): 新增系统配置参数

- 配置数据库、Redis、Session和MinIO相关参数
- 添加AI接口地址及试用Key配置
- 集成阿里云短信服务配置及开发模式相关参数

feat(extensions): 初始化数据库、Redis和MinIO客户端

- 创建全局SQLAlchemy数据库实例和Redis客户端
- 配置基于boto3的MinIO兼容S3客户端

chore(logs): 添加示例系统日志文件

- 记录用户请求、验证码发送成功与失败的日志信息
2026-01-12 00:53:31 +08:00

206 lines
6.9 KiB
Python

import logging
import sys
import traceback
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from traceback import format_tb
from apscheduler.events import (
EVENT_JOB_ERROR,
EVENT_JOB_EXECUTED,
EVENT_JOB_MISSED,
JobExecutionEvent,
)
class MaxInstancesReachedError(Exception):
def __init__(self, job):
super().__init__(
f'Job "{job.id}" has already reached its maximum number of instances '
f"({job.max_instances})"
)
class BaseExecutor(metaclass=ABCMeta):
"""Abstract base class that defines the interface that every executor must implement."""
_scheduler = None
_lock = None
_logger = logging.getLogger("apscheduler.executors")
def __init__(self):
super().__init__()
self._instances = defaultdict(lambda: 0)
def start(self, scheduler, alias):
"""
Called by the scheduler when the scheduler is being started or when the executor is being
added to an already running scheduler.
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
this executor
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
"""
self._scheduler = scheduler
self._lock = scheduler._create_lock()
self._logger = logging.getLogger(f"apscheduler.executors.{alias}")
def shutdown(self, wait=True):
"""
Shuts down this executor.
:param bool wait: ``True`` to wait until all submitted jobs
have been executed
"""
def submit_job(self, job, run_times):
"""
Submits job for execution.
:param Job job: job to execute
:param list[datetime] run_times: list of datetimes specifying
when the job should have been run
:raises MaxInstancesReachedError: if the maximum number of
allowed instances for this job has been reached
"""
assert self._lock is not None, "This executor has not been started yet"
with self._lock:
if self._instances[job.id] >= job.max_instances:
raise MaxInstancesReachedError(job)
self._do_submit_job(job, run_times)
self._instances[job.id] += 1
@abstractmethod
def _do_submit_job(self, job, run_times):
"""Performs the actual task of scheduling `run_job` to be called."""
def _run_job_success(self, job_id, events):
"""
Called by the executor with the list of generated events when :func:`run_job` has been
successfully called.
"""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
for event in events:
self._scheduler._dispatch_event(event)
def _run_job_error(self, job_id, exc, traceback=None):
"""Called by the executor with the exception if there is an error calling `run_job`."""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
exc_info = (exc.__class__, exc, traceback)
self._logger.error("Error running job %s", job_id, exc_info=exc_info)
def run_job(job, jobstore_alias, run_times, logger_name):
"""
Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
scheduler.
"""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle
# possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(timezone.utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(
JobExecutionEvent(
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
)
)
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = "".join(format_tb(tb))
events.append(
JobExecutionEvent(
EVENT_JOB_ERROR,
job.id,
jobstore_alias,
run_time,
exception=exc,
traceback=formatted_tb,
)
)
logger.exception('Job "%s" raised an exception', job)
# This is to prevent cyclic references that would lead to memory leaks
traceback.clear_frames(tb)
del tb
else:
events.append(
JobExecutionEvent(
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
)
)
logger.info('Job "%s" executed successfully', job)
return events
async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
"""Coroutine version of run_job()."""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(timezone.utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(
JobExecutionEvent(
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
)
)
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = await job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = "".join(format_tb(tb))
events.append(
JobExecutionEvent(
EVENT_JOB_ERROR,
job.id,
jobstore_alias,
run_time,
exception=exc,
traceback=formatted_tb,
)
)
logger.exception('Job "%s" raised an exception', job)
traceback.clear_frames(tb)
else:
events.append(
JobExecutionEvent(
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
)
)
logger.info('Job "%s" executed successfully', job)
return events