mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-07-27 21:40:19 +00:00
add profiling
This commit is contained in:
parent
71ef6b32c6
commit
58cb4fb4f7
@ -134,8 +134,12 @@ class BaseConsumer:
|
|||||||
def calculate_effective_group_to_raw_group_mapping(self, step):
|
def calculate_effective_group_to_raw_group_mapping(self, step):
|
||||||
effective_group_to_raw_group_mapping = {}
|
effective_group_to_raw_group_mapping = {}
|
||||||
for buffer_idx in range(len(self.buffer)):
|
for buffer_idx in range(len(self.buffer)):
|
||||||
if self.buffer[buffer_idx][0] is not None and (self.buffer[buffer_idx][-1] <= step - self.n_behind):
|
if self.buffer[buffer_idx][0] is not None:
|
||||||
effective_group_to_raw_group_mapping[len(effective_group_to_raw_group_mapping)] = buffer_idx
|
if self.n_behind == 0:
|
||||||
|
effective_group_to_raw_group_mapping[len(effective_group_to_raw_group_mapping)] = buffer_idx
|
||||||
|
else:
|
||||||
|
if self.buffer[buffer_idx][-1] <= step - self.n_behind:
|
||||||
|
effective_group_to_raw_group_mapping[len(effective_group_to_raw_group_mapping)] = buffer_idx
|
||||||
return effective_group_to_raw_group_mapping
|
return effective_group_to_raw_group_mapping
|
||||||
|
|
||||||
def loop(self) -> None:
|
def loop(self) -> None:
|
||||||
|
@ -246,14 +246,25 @@ if __name__ == "__main__":
|
|||||||
tensor_parallel_size=args.producer_tensor_parallel_size,
|
tensor_parallel_size=args.producer_tensor_parallel_size,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
generate_config.update(
|
if args.enable_profiling:
|
||||||
dict(
|
# If profiling is enabled, we force model to generate to max_new_tokens
|
||||||
max_tokens=args.max_new_tokens, # max new tokens
|
inference_model_config.update(
|
||||||
ignore_eos=True if args.reward_type == "think_answer_tags" else False,
|
dict(
|
||||||
include_stop_str_in_output=True,
|
max_tokens=args.max_new_tokens, # max new tokens
|
||||||
stop=["</answer>"] if args.reward_type == "think_answer_tags" else None,
|
ignore_eos=True,
|
||||||
|
include_stop_str_in_output=True,
|
||||||
|
stop=None,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
generate_config.update(
|
||||||
|
dict(
|
||||||
|
max_tokens=args.max_new_tokens, # max new tokens
|
||||||
|
ignore_eos=True if args.reward_type == "think_answer_tags" else False,
|
||||||
|
include_stop_str_in_output=True,
|
||||||
|
stop=["</answer>"] if args.reward_type == "think_answer_tags" else None,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
eval_generation_config = {"temperature": 0.6} # used to update generation config for evaluation
|
eval_generation_config = {"temperature": 0.6} # used to update generation config for evaluation
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported backend: {args.backend}")
|
raise ValueError(f"Unsupported backend: {args.backend}")
|
||||||
|
Loading…
Reference in New Issue
Block a user