mirror of
https://github.com/jumpserver/jumpserver.git
synced 2025-12-24 21:12:35 +00:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f84737c88 | ||
|
|
ee9687743e | ||
|
|
d5a9942159 | ||
|
|
3b92e9e516 | ||
|
|
0132eafeb6 | ||
|
|
fb286f4665 | ||
|
|
daeba109fd | ||
|
|
06411b080e | ||
|
|
5da6a1b9b6 | ||
|
|
988f33634e | ||
|
|
a645dc09ae |
2
.github/workflows/release-drafter.yml
vendored
2
.github/workflows/release-drafter.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
TAG=$(basename ${GITHUB_REF})
|
||||
VERSION=${TAG/v/}
|
||||
wget https://raw.githubusercontent.com/jumpserver/installer/master/quick_start.sh
|
||||
sed -i "s@Version=.*@Version=v${VERSION}@g" quick_start.sh
|
||||
sed -i "s@VERSION=dev@VERSION=v${VERSION}@g" quick_start.sh
|
||||
echo "::set-output name=TAG::$TAG"
|
||||
echo "::set-output name=VERSION::$VERSION"
|
||||
- name: Create Release
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.8-slim as stage-build
|
||||
FROM python:3.8-slim-bullseye as stage-build
|
||||
ARG TARGETARCH
|
||||
|
||||
ARG VERSION
|
||||
@@ -8,7 +8,7 @@ WORKDIR /opt/jumpserver
|
||||
ADD . .
|
||||
RUN cd utils && bash -ixeu build.sh
|
||||
|
||||
FROM python:3.8-slim
|
||||
FROM python:3.8-slim-bullseye
|
||||
ARG TARGETARCH
|
||||
MAINTAINER JumpServer Team <ibuler@qq.com>
|
||||
|
||||
@@ -87,8 +87,10 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
&& pip config set global.index-url ${PIP_MIRROR} \
|
||||
&& pip install --upgrade pip \
|
||||
&& pip install --upgrade setuptools wheel \
|
||||
&& pip install Cython==0.29.35 \
|
||||
&& pip install --no-build-isolation pymssql \
|
||||
&& pip install $(grep -E 'jms|jumpserver' requirements/requirements.txt) -i ${PIP_JMS_MIRROR} \
|
||||
&& pip install -r requirements/requirements.txt
|
||||
&& pip install -r requirements/requirements.txt --use-deprecated=legacy-resolver
|
||||
|
||||
COPY --from=stage-build /opt/jumpserver/release/jumpserver /opt/jumpserver
|
||||
RUN echo > /opt/jumpserver/config.yml \
|
||||
|
||||
2
GITSHA
2
GITSHA
@@ -1 +1 @@
|
||||
4e4e58480fc061c2e487c64f0f70667e22eaef27
|
||||
ee9687743eb5a062b1b9d4a607c149d2bd73c58a
|
||||
|
||||
@@ -125,6 +125,7 @@ class ConnectionTokenMixin:
|
||||
'bookmarktype:i': '3',
|
||||
'use redirection server name:i': '0',
|
||||
'smart sizing:i': '1',
|
||||
'disableconnectionsharing:i': '1',
|
||||
# 'drivestoredirect:s': '*',
|
||||
# 'domain:s': ''
|
||||
# 'alternate shell:s:': '||MySQLWorkbench',
|
||||
|
||||
0
apps/ops/celery/beat/__init__.py
Normal file
0
apps/ops/celery/beat/__init__.py
Normal file
80
apps/ops/celery/beat/schedulers.py
Normal file
80
apps/ops/celery/beat/schedulers.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import logging
|
||||
from celery.utils.log import get_logger
|
||||
from django.db import close_old_connections
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db.utils import DatabaseError, InterfaceError
|
||||
|
||||
from django_celery_beat.schedulers import DatabaseScheduler as DJDatabaseScheduler
|
||||
|
||||
logger = get_logger(__name__)
|
||||
debug, info, warning = logger.debug, logger.info, logger.warning
|
||||
|
||||
|
||||
__all__ = ['DatabaseScheduler']
|
||||
|
||||
|
||||
class DatabaseScheduler(DJDatabaseScheduler):
|
||||
|
||||
def sync(self):
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
debug('Writing entries...')
|
||||
_tried = set()
|
||||
_failed = set()
|
||||
try:
|
||||
close_old_connections()
|
||||
|
||||
while self._dirty:
|
||||
name = self._dirty.pop()
|
||||
try:
|
||||
# 源码
|
||||
# self.schedule[name].save()
|
||||
# _tried.add(name)
|
||||
|
||||
"""
|
||||
::Debug Description (2023.07.10)::
|
||||
|
||||
如果调用 self.schedule 可能会导致 self.save() 方法之前重新获取数据库中的数据, 而不是临时设置的 last_run_at 数据
|
||||
|
||||
如果这里调用 self.schedule
|
||||
那么可能会导致调用 save 的 self.schedule[name] 的 last_run_at 是从数据库中获取回来的老数据
|
||||
而不是任务执行后临时设置的 last_run_at (在 __next__() 方法中设置的)
|
||||
当 `max_interval` 间隔之后, 下一个任务检测周期还是会再次执行任务
|
||||
|
||||
::Demo::
|
||||
任务信息:
|
||||
beat config: max_interval = 60s
|
||||
|
||||
任务名称: cap
|
||||
任务执行周期: 每 3 分钟执行一次
|
||||
任务最后执行时间: 18:00
|
||||
|
||||
任务第一次执行: 18:03 (执行时设置 last_run_at = 18:03, 此时在内存中)
|
||||
|
||||
任务执行完成后,
|
||||
检测到需要 sync, sync 中调用了 self.schedule,
|
||||
self.schedule 中发现 schedule_changed() 为 True, 需要调用 all_as_schedule()
|
||||
此时,sync 中调用的 self.schedule[name] 的 last_run_at 是 18:00
|
||||
这时候在 self.sync() 进行 self.save()
|
||||
|
||||
|
||||
beat: Waking up 60s ...
|
||||
|
||||
任务第二次执行: 18:04 (因为获取回来的 last_run_at 是 18:00, entry.is_due() == True)
|
||||
|
||||
::解决方法::
|
||||
所以这里为了避免从数据库中获取,直接使用 _schedule #
|
||||
"""
|
||||
self._schedule[name].save()
|
||||
_tried.add(name)
|
||||
except (KeyError, TypeError, ObjectDoesNotExist):
|
||||
_failed.add(name)
|
||||
except DatabaseError as exc:
|
||||
logger.exception('Database error while sync: %r', exc)
|
||||
except InterfaceError:
|
||||
warning(
|
||||
'DatabaseScheduler: InterfaceError in sync(), '
|
||||
'waiting to retry in next call...'
|
||||
)
|
||||
finally:
|
||||
# retry later, only for the failed ones
|
||||
self._dirty |= _failed
|
||||
@@ -149,7 +149,7 @@ class BuiltinRole:
|
||||
'User': cls.system_user.get_role(),
|
||||
'Auditor': cls.system_auditor.get_role()
|
||||
}
|
||||
return cls.system_role_mapper[name]
|
||||
return cls.system_role_mapper.get(name, cls.system_role_mapper['User'])
|
||||
|
||||
@classmethod
|
||||
def get_org_role_by_old_name(cls, name):
|
||||
@@ -159,7 +159,7 @@ class BuiltinRole:
|
||||
'User': cls.org_user.get_role(),
|
||||
'Auditor': cls.org_auditor.get_role(),
|
||||
}
|
||||
return cls.org_role_mapper[name]
|
||||
return cls.org_role_mapper.get(name, cls.org_role_mapper['User'])
|
||||
|
||||
@classmethod
|
||||
def sync_to_db(cls, show_msg=False):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from django.db.transaction import atomic
|
||||
from django.db.models import Model
|
||||
from django.db.transaction import atomic
|
||||
from django.utils.translation import ugettext as _
|
||||
from rest_framework import serializers
|
||||
|
||||
@@ -69,8 +69,6 @@ class BaseApplyAssetApplicationSerializer(serializers.Serializer):
|
||||
error = _('The expiration date should be greater than the start date')
|
||||
raise serializers.ValidationError({'apply_date_expired': error})
|
||||
|
||||
attrs['apply_date_start'] = apply_date_start
|
||||
attrs['apply_date_expired'] = apply_date_expired
|
||||
return attrs
|
||||
|
||||
@atomic
|
||||
|
||||
@@ -81,7 +81,6 @@ def check_user_expired_periodic():
|
||||
|
||||
|
||||
@shared_task
|
||||
@transaction.atomic
|
||||
def import_ldap_user():
|
||||
logger.info("Start import ldap user task")
|
||||
util_server = LDAPServerUtil()
|
||||
|
||||
@@ -128,9 +128,9 @@ kubernetes==21.7.0
|
||||
# DB requirements
|
||||
mysqlclient==2.1.0
|
||||
PyMySQL==1.0.2
|
||||
oracledb==1.0.1
|
||||
oracledb==1.2.2
|
||||
psycopg2-binary==2.9.1
|
||||
pymssql==2.1.5
|
||||
# pymssql==2.2.7
|
||||
django-mysql==3.9.0
|
||||
django-redis==5.2.0
|
||||
python-redis-lock==3.7.0
|
||||
|
||||
@@ -54,7 +54,7 @@ else:
|
||||
connection_params['port'] = settings.REDIS_PORT
|
||||
redis_client = Redis(**connection_params)
|
||||
|
||||
scheduler = "django_celery_beat.schedulers:DatabaseScheduler"
|
||||
scheduler = "ops.celery.beat.schedulers:DatabaseScheduler"
|
||||
processes = []
|
||||
cmd = [
|
||||
'celery',
|
||||
|
||||
Reference in New Issue
Block a user