mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2026-04-26 09:42:27 +00:00
* add langchain * add langchain * Add files via upload * add langchain * fix style * fix style: remove extra space * add pytest; modified retriever * add pytest; modified retriever * add tests to build_on_pr.yml * fix build_on_pr.yml * fix build on pr; fix environ vars * seperate unit tests for colossalqa from build from pr * fix container setting; fix environ vars * commented dev code * add incremental update * remove stale code * fix style * change to sha3 224 * fix retriever; fix style; add unit test for document loader * fix ci workflow config * fix ci workflow config * add set cuda visible device script in ci * fix doc string * fix style; update readme; refactored * add force log info * change build on pr, ignore colossalqa * fix docstring, captitalize all initial letters * fix indexing; fix text-splitter * remove debug code, update reference * reset previous commit * update LICENSE update README add key-value mode, fix bugs * add files back * revert force push * remove junk file * add test files * fix retriever bug, add intent classification * change conversation chain design * rewrite prompt and conversation chain * add ui v1 * ui v1 * fix atavar * add header * Refactor the RAG Code and support Pangu * Refactor the ColossalQA chain to Object-Oriented Programming and the UI demo. * resolved conversation. tested scripts under examples. web demo still buggy * fix ci tests * Some modifications to add ChatGPT api * modify llm.py and remove unnecessary files * Delete applications/ColossalQA/examples/ui/test_frontend_input.json * Remove OpenAI api key * add colossalqa * move files * move files * move files * move files * fix style * Add Readme and fix some bugs. * Add something to readme and modify some code * modify a directory name for clarity * remove redundant directory * Correct a type in llm.py * fix AI prefix * fix test_memory.py * fix conversation * fix some erros and typos * Fix a missing import in RAG_ChatBot.py * add colossalcloud LLM wrapper, correct issues in code review --------- Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Orion-Zheng <zheng_zian@u.nus.edu> Co-authored-by: Zian(Andy) Zheng <62330719+Orion-Zheng@users.noreply.github.com> Co-authored-by: Orion-Zheng <zhengzian@u.nus.edu>
54 lines
1.7 KiB
YAML
54 lines
1.7 KiB
YAML
name: Run colossalqa unit tests
|
|
|
|
on:
|
|
pull_request:
|
|
types: [synchronize, opened, reopened]
|
|
paths:
|
|
- 'applications/ColossalQA/colossalqa/**'
|
|
- 'applications/ColossalQA/requirements.txt'
|
|
- 'applications/ColossalQA/setup.py'
|
|
- 'applications/ColossalQA/tests/**'
|
|
- 'applications/ColossalQA/pytest.ini'
|
|
|
|
jobs:
|
|
tests:
|
|
name: Run colossalqa unit tests
|
|
if: |
|
|
github.event.pull_request.draft == false &&
|
|
github.base_ref == 'main' &&
|
|
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
|
|
runs-on: [self-hosted, gpu]
|
|
container:
|
|
image: hpcaitech/pytorch-cuda:1.12.0-11.3.0
|
|
volumes:
|
|
- /data/scratch/test_data_colossalqa:/data/scratch/test_data_colossalqa
|
|
- /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
|
options: --gpus all --rm
|
|
timeout-minutes: 30
|
|
defaults:
|
|
run:
|
|
shell: bash
|
|
steps:
|
|
- name: Checkout ColossalAI
|
|
uses: actions/checkout@v2
|
|
|
|
- name: Install colossalqa
|
|
run: |
|
|
cd applications/ColossalQA
|
|
pip install -e .
|
|
|
|
- name: Execute Unit Testing
|
|
run: |
|
|
cd applications/ColossalQA
|
|
pytest tests/
|
|
env:
|
|
NCCL_SHM_DISABLE: 1
|
|
MAX_JOBS: 8
|
|
ZH_MODEL_PATH: bigscience/bloom-560m
|
|
ZH_MODEL_NAME: bloom
|
|
EN_MODEL_PATH: bigscience/bloom-560m
|
|
EN_MODEL_NAME: bloom
|
|
TEST_DATA_PATH_EN: /data/scratch/test_data_colossalqa/companies.txt
|
|
TEST_DATA_PATH_ZH: /data/scratch/test_data_colossalqa/companies_zh.txt
|
|
TEST_DOCUMENT_LOADER_DATA_PATH: /data/scratch/test_data_colossalqa/tests/*
|
|
SQL_FILE_PATH: /data/scratch/test_data_colossalqa/sql_file_path |