mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-05-07 07:58:27 +00:00
Merge 6f61be451f
into 46ed5d856b
This commit is contained in:
commit
fdfc28e21a
@ -8,21 +8,21 @@ repos:
|
||||
args: ['--in-place', '--remove-unused-variables', '--remove-all-unused-imports', '--ignore-init-module-imports']
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.13.2
|
||||
rev: 6.0.1
|
||||
hooks:
|
||||
- id: isort
|
||||
name: sort all imports (python)
|
||||
args: ["--profile", "black"] # avoid conflict with black
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 24.10.0
|
||||
rev: 25.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
name: black formatter
|
||||
args: ['--line-length=120', '--target-version=py37', '--target-version=py38', '--target-version=py39','--target-version=py310']
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: v19.1.5
|
||||
rev: v20.1.0
|
||||
hooks:
|
||||
- id: clang-format
|
||||
name: clang formatter
|
||||
|
@ -81,11 +81,11 @@ with gr.Blocks(css=CSS) as demo:
|
||||
)
|
||||
with gr.Row():
|
||||
btn = gr.UploadButton("📁", file_types=["file"], file_count="multiple", size="sm")
|
||||
restart_btn = gr.Button(str("\u21BB"), elem_id="restart-btn", scale=1)
|
||||
restart_btn = gr.Button(str("\u21bb"), elem_id="restart-btn", scale=1)
|
||||
txt = gr.Textbox(
|
||||
scale=8,
|
||||
show_label=False,
|
||||
placeholder="Enter text and press enter, or use 📁 to upload files, click \u21BB to clear loaded files and restart chat",
|
||||
placeholder="Enter text and press enter, or use 📁 to upload files, click \u21bb to clear loaded files and restart chat",
|
||||
container=True,
|
||||
autofocus=True,
|
||||
)
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""This code is adapted from Alpa
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes. """
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes."""
|
||||
|
||||
import multiprocessing
|
||||
import time
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""This code is adapted from Alpa
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes. """
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes."""
|
||||
|
||||
import operator
|
||||
from dataclasses import dataclass
|
||||
|
@ -17,7 +17,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" PyTorch OpenMoE model."""
|
||||
"""PyTorch OpenMoE model."""
|
||||
import math
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""This code is from NVIDIA apex:
|
||||
https://github.com/NVIDIA/apex
|
||||
with some changes. """
|
||||
https://github.com/NVIDIA/apex
|
||||
with some changes."""
|
||||
|
||||
import numbers
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
""" adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
"""adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
|
@ -1,4 +1,4 @@
|
||||
""" adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
"""adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
|
||||
import warnings
|
||||
from typing import List
|
||||
|
@ -1,4 +1,4 @@
|
||||
""" PyTorch ChatGLM model. """
|
||||
"""PyTorch ChatGLM model."""
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
@ -34,8 +34,8 @@ class PreTrainingDataset:
|
||||
self.do_whole_word_mask = do_whole_word_mask
|
||||
self.max_predictions_per_seq = max_predictions_per_seq
|
||||
self.vocab_words = list(tokenizer.vocab.keys())
|
||||
self.rec = re.compile("[\u4E00-\u9FA5]")
|
||||
self.whole_rec = re.compile("##[\u4E00-\u9FA5]")
|
||||
self.rec = re.compile("[\u4e00-\u9fa5]")
|
||||
self.whole_rec = re.compile("##[\u4e00-\u9fa5]")
|
||||
|
||||
self.mlm_p = 0.15
|
||||
self.mlm_mask_p = 0.8
|
||||
|
@ -12,7 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" PyTorch DeBERTa-v2 model."""
|
||||
"""PyTorch DeBERTa-v2 model."""
|
||||
|
||||
import math
|
||||
from collections.abc import Sequence
|
||||
|
Loading…
Reference in New Issue
Block a user