mirror of
https://github.com/jumpserver/jumpserver.git
synced 2025-09-04 17:01:09 +00:00
feat(excel): 添加Excel导入/导出 (#5124)
* refactor(drf_renderer): 添加 ExcelRenderer 支持导出excel文件格式; 优化CSVRenderer, 抽象 BaseRenderer * perf(renderer): 支持导出资源详情 * refactor(drf_parser): 添加 ExcelParser 支持导入excel文件格式; 优化CSVParser, 抽象 BaseParser * refactor(drf_parser): 添加 ExcelParser 支持导入excel文件格式; 优化CSVParser, 抽象 BaseParser 2 * perf(renderer): 捕获renderer处理异常 * perf: 添加excel依赖包 * perf(drf): 优化导入导出错误日志 * perf: 添加依赖包 pyexcel-io==0.6.4 * perf: 添加依赖包pyexcel-xlsx==0.6.0 * feat: 修改drf/renderer&parser变量命名 * feat: 修改drf/renderer的bug * feat: 修改drf/renderer&parser变量命名 Co-authored-by: Bai <bugatti_it@163.com>
This commit is contained in:
@@ -1,32 +1,13 @@
|
||||
# ~*~ coding: utf-8 ~*~
|
||||
#
|
||||
|
||||
import json
|
||||
import chardet
|
||||
import codecs
|
||||
import unicodecsv
|
||||
|
||||
from django.utils.translation import ugettext as _
|
||||
from rest_framework.parsers import BaseParser
|
||||
from rest_framework.exceptions import ParseError, APIException
|
||||
from rest_framework import status
|
||||
|
||||
from common.utils import get_logger
|
||||
|
||||
logger = get_logger(__file__)
|
||||
from .base import BaseFileParser
|
||||
|
||||
|
||||
class CsvDataTooBig(APIException):
|
||||
status_code = status.HTTP_400_BAD_REQUEST
|
||||
default_code = 'csv_data_too_big'
|
||||
default_detail = _('The max size of CSV is %d bytes')
|
||||
|
||||
|
||||
class JMSCSVParser(BaseParser):
|
||||
"""
|
||||
Parses CSV file to serializer data
|
||||
"""
|
||||
CSV_UPLOAD_MAX_SIZE = 1024 * 1024 * 10
|
||||
class CSVFileParser(BaseFileParser):
|
||||
|
||||
media_type = 'text/csv'
|
||||
|
||||
@@ -38,99 +19,10 @@ class JMSCSVParser(BaseParser):
|
||||
for line in stream.splitlines():
|
||||
yield line
|
||||
|
||||
@staticmethod
|
||||
def _gen_rows(csv_data, charset='utf-8', **kwargs):
|
||||
csv_reader = unicodecsv.reader(csv_data, encoding=charset, **kwargs)
|
||||
def generate_rows(self, stream_data):
|
||||
detect_result = chardet.detect(stream_data)
|
||||
encoding = detect_result.get("encoding", "utf-8")
|
||||
lines = self._universal_newlines(stream_data)
|
||||
csv_reader = unicodecsv.reader(lines, encoding=encoding)
|
||||
for row in csv_reader:
|
||||
if not any(row): # 空行
|
||||
continue
|
||||
yield row
|
||||
|
||||
@staticmethod
|
||||
def _get_fields_map(serializer_cls):
|
||||
fields_map = {}
|
||||
fields = serializer_cls().fields
|
||||
fields_map.update({v.label: k for k, v in fields.items()})
|
||||
fields_map.update({k: k for k, _ in fields.items()})
|
||||
return fields_map
|
||||
|
||||
@staticmethod
|
||||
def _replace_chinese_quot(str_):
|
||||
trans_table = str.maketrans({
|
||||
'“': '"',
|
||||
'”': '"',
|
||||
'‘': '"',
|
||||
'’': '"',
|
||||
'\'': '"'
|
||||
})
|
||||
return str_.translate(trans_table)
|
||||
|
||||
@classmethod
|
||||
def _process_row(cls, row):
|
||||
"""
|
||||
构建json数据前的行处理
|
||||
"""
|
||||
_row = []
|
||||
|
||||
for col in row:
|
||||
# 列表转换
|
||||
if isinstance(col, str) and col.startswith('[') and col.endswith(']'):
|
||||
col = cls._replace_chinese_quot(col)
|
||||
col = json.loads(col)
|
||||
# 字典转换
|
||||
if isinstance(col, str) and col.startswith("{") and col.endswith("}"):
|
||||
col = cls._replace_chinese_quot(col)
|
||||
col = json.loads(col)
|
||||
_row.append(col)
|
||||
return _row
|
||||
|
||||
@staticmethod
|
||||
def _process_row_data(row_data):
|
||||
"""
|
||||
构建json数据后的行数据处理
|
||||
"""
|
||||
_row_data = {}
|
||||
for k, v in row_data.items():
|
||||
if isinstance(v, list) or isinstance(v, dict)\
|
||||
or isinstance(v, str) and k.strip() and v.strip():
|
||||
_row_data[k] = v
|
||||
return _row_data
|
||||
|
||||
def parse(self, stream, media_type=None, parser_context=None):
|
||||
parser_context = parser_context or {}
|
||||
try:
|
||||
view = parser_context['view']
|
||||
meta = view.request.META
|
||||
serializer_cls = view.get_serializer_class()
|
||||
except Exception as e:
|
||||
logger.debug(e, exc_info=True)
|
||||
raise ParseError('The resource does not support imports!')
|
||||
|
||||
content_length = int(meta.get('CONTENT_LENGTH', meta.get('HTTP_CONTENT_LENGTH', 0)))
|
||||
if content_length > self.CSV_UPLOAD_MAX_SIZE:
|
||||
msg = CsvDataTooBig.default_detail % self.CSV_UPLOAD_MAX_SIZE
|
||||
logger.error(msg)
|
||||
raise CsvDataTooBig(msg)
|
||||
|
||||
try:
|
||||
stream_data = stream.read()
|
||||
stream_data = stream_data.strip(codecs.BOM_UTF8)
|
||||
detect_result = chardet.detect(stream_data)
|
||||
encoding = detect_result.get("encoding", "utf-8")
|
||||
binary = self._universal_newlines(stream_data)
|
||||
rows = self._gen_rows(binary, charset=encoding)
|
||||
|
||||
header = next(rows)
|
||||
fields_map = self._get_fields_map(serializer_cls)
|
||||
header = [fields_map.get(name.strip('*'), '') for name in header]
|
||||
|
||||
data = []
|
||||
for row in rows:
|
||||
row = self._process_row(row)
|
||||
row_data = dict(zip(header, row))
|
||||
row_data = self._process_row_data(row_data)
|
||||
data.append(row_data)
|
||||
return data
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise ParseError('CSV parse error!')
|
||||
|
Reference in New Issue
Block a user