mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-12 20:54:35 +00:00
[Test/CI] remove test cases to reduce CI duration (#5753)
* [test] smaller gpt2 test case
* [test] reduce test cases: tests/test_zero/test_gemini/test_zeroddp_state_dict.py
* [test] reduce test cases: tests/test_zero/test_gemini/test_grad_accum.py
* [test] reduce test cases tests/test_zero/test_gemini/test_optim.py
* Revert "[test] smaller gpt2 test case"
Some tests might depend on the size of model (num of chunks)
This reverts commit df705a5210
.
* [test] reduce test cases: tests/test_checkpoint_io/test_gemini_checkpoint_io.py
* [CI] smaller test model for two mwo the two modifid cases
* [CI] hardcode gpt model for tests/test_zero/test_gemini/test_search.py since we need a fixed answer there
This commit is contained in:
@@ -18,23 +18,8 @@ def data_gen():
|
||||
# tokenized_input = tokenizer(input, return_tensors='pt')
|
||||
# input_ids = tokenized_input['input_ids']
|
||||
# attention_mask = tokenized_input['attention_mask']
|
||||
# input_ids = torch.tensor([[15496, 11, 616, 3290, 318, 13779, 318, 13779]], dtype=torch.int64)
|
||||
# attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
||||
input_ids = torch.tensor(
|
||||
[
|
||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
||||
],
|
||||
dtype=torch.int64,
|
||||
)
|
||||
attention_mask = torch.tensor(
|
||||
[
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
],
|
||||
dtype=torch.int64,
|
||||
)
|
||||
|
||||
input_ids = torch.tensor([[22, 11, 616, 4, 5, 13, 318, 345]], dtype=torch.int64)
|
||||
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
||||
return dict(input_ids=input_ids, attention_mask=attention_mask)
|
||||
|
||||
|
||||
@@ -50,9 +35,9 @@ def data_gen_for_question_answering():
|
||||
# question answering data gen
|
||||
# `labels` is the type not the token id for token classification, 0 or 1
|
||||
data = data_gen()
|
||||
start_positions = torch.tensor([[0], [0]], dtype=torch.int64)
|
||||
start_positions = torch.tensor([0], dtype=torch.int64)
|
||||
data["start_positions"] = start_positions
|
||||
end_positions = torch.tensor([[1], [1]], dtype=torch.int64)
|
||||
end_positions = torch.tensor([1], dtype=torch.int64)
|
||||
data["end_positions"] = end_positions
|
||||
return data
|
||||
|
||||
@@ -61,20 +46,14 @@ def data_gen_for_token_classification():
|
||||
# token classification data gen
|
||||
# `labels` is the type not the token id for token classification, 0 or 1
|
||||
data = data_gen()
|
||||
data["labels"] = torch.tensor(
|
||||
[
|
||||
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
|
||||
],
|
||||
dtype=torch.int64,
|
||||
)
|
||||
data["labels"] = torch.tensor([[0, 0, 0, 0, 0, 0, 0, 1]], dtype=torch.int64)
|
||||
return data
|
||||
|
||||
|
||||
def data_gen_for_sequence_classification():
|
||||
# sequence classification data gen
|
||||
data = data_gen()
|
||||
data["labels"] = torch.tensor([[1], [1]], dtype=torch.int64)
|
||||
data["labels"] = torch.tensor([1], dtype=torch.int64)
|
||||
return data
|
||||
|
||||
|
||||
@@ -82,18 +61,12 @@ def date_gen_for_double_heads():
|
||||
num_choices = 2
|
||||
batch_size = 2
|
||||
input_ids = torch.tensor(
|
||||
[
|
||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
||||
],
|
||||
[[46, 11, 616, 432, 318, 19, 318, 555], [777, 11, 235, 333, 318, 231, 468, 136]],
|
||||
dtype=torch.int64,
|
||||
)
|
||||
attention_mask = torch.tensor(
|
||||
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
||||
dtype=torch.int64,
|
||||
)
|
||||
|
||||
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
||||
mc_labels = torch.zeros(input_ids.shape[0], dtype=torch.int64)
|
||||
|
||||
mc_token_ids = torch.arange(0, num_choices, dtype=torch.int64)
|
||||
mc_token_ids = mc_token_ids.expand((batch_size, num_choices))
|
||||
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, num_choices, -1).contiguous()
|
||||
@@ -122,14 +95,14 @@ config = transformers.GPT2Config(
|
||||
n_layer=2,
|
||||
n_head=4,
|
||||
n_embd=128,
|
||||
vocab_size=50258,
|
||||
vocab_size=1024,
|
||||
attn_pdrop=0,
|
||||
embd_pdrop=0,
|
||||
resid_pdrop=0,
|
||||
summary_first_dropout=0,
|
||||
hidden_dropout=0,
|
||||
problem_type="single_label_classification",
|
||||
pad_token_id=50256,
|
||||
pad_token_id=1022,
|
||||
tie_word_embeddings=True,
|
||||
)
|
||||
|
||||
|
Reference in New Issue
Block a user