mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-08 12:30:42 +00:00
[nfc] fix typo colossalai/ applications/ (#3831)
* fix typo colossalai/autochunk auto_parallel amp * fix typo colossalai/auto_parallel nn utils etc. * fix typo colossalai/auto_parallel autochunk fx/passes etc. * fix typo docs/ * change placememt_policy to placement_policy in docs/ and examples/ * fix typo colossalai/ applications/
This commit is contained in:
@@ -34,7 +34,7 @@ class DetachedReplayBuffer:
|
||||
'''
|
||||
Workers in the same tp group share this buffer and need same sample for one step.
|
||||
Therefore a held_sample should be returned tp_world_size times before it could be dropped.
|
||||
worker_state records wheter a worker got the held_sample
|
||||
worker_state records whether a worker got the held_sample
|
||||
'''
|
||||
self.tp_world_size = tp_world_size
|
||||
self.worker_state = [False] * self.tp_world_size
|
||||
|
@@ -22,7 +22,7 @@ from .utils import is_rank_0, get_strategy_from_args, set_dist_env
|
||||
class ExperienceMakerHolder:
|
||||
'''
|
||||
Args:
|
||||
detached_trainer_name_list: str list to get ray actor handleskkk
|
||||
detached_trainer_name_list: str list to get ray actor handles
|
||||
strategy:
|
||||
experience_batch_size: batch size of generated experience
|
||||
kl_coef: the coefficient of kl divergence loss
|
||||
|
@@ -26,7 +26,7 @@ rpc_is_initialized = _is_current_rpc_agent_set
|
||||
class PipelineModel(torch.nn.Module):
|
||||
'''
|
||||
Actor has 2 kinds of jobs: forward and generate.
|
||||
better to just pipelinize the inner model
|
||||
better to just pipeline the inner model
|
||||
'''
|
||||
def __init__(self,
|
||||
model: torch.nn.Module,
|
||||
|
@@ -119,7 +119,7 @@ class Evaluator(object):
|
||||
jdump(all_evaluations,
|
||||
os.path.join(evaluation_results_save_path, f"{model_name_list[0]}_evaluation_results.json"))
|
||||
|
||||
# Start to calculate scores and save statictics.
|
||||
# Start to calculate scores and save statistics.
|
||||
evaluation_statistics_save_path = os.path.join(base_save_path, "evaluation_statistics")
|
||||
gpt_evaluate.save_gpt35_evaluation_statistics(model_name_list[0], all_evaluations,
|
||||
evaluation_statistics_save_path)
|
||||
|
@@ -111,7 +111,7 @@ def calculate_precision_recall_f1(preds: list, targets: list) -> dict:
|
||||
The calculation of precision, recall and f1-score is realized by counting
|
||||
the number f overlaps between the preds and target. The comparison length
|
||||
limited by the shorter one of preds and targets. This design is mainly
|
||||
considered for classifiction and extraction categories.
|
||||
considered for classification and extraction categories.
|
||||
"""
|
||||
precision_recall_f1 = {"precision": 0, "recall": 0, "f1_score": 0}
|
||||
precision_scores = []
|
||||
@@ -138,7 +138,7 @@ def calculate_precision_recall_f1(preds: list, targets: list) -> dict:
|
||||
|
||||
def precision(preds: list, targets: list) -> dict:
|
||||
"""Calculate Precision Metric
|
||||
(design for classifiction and extraction categories)
|
||||
(design for classification and extraction categories)
|
||||
|
||||
Calculating precision by counting the number of overlaps between the preds and target.
|
||||
"""
|
||||
@@ -149,7 +149,7 @@ def precision(preds: list, targets: list) -> dict:
|
||||
|
||||
def recall(preds: list, targets: list) -> dict:
|
||||
"""Calculate Recall Metric
|
||||
(design for classifiction and extraction categories)
|
||||
(design for classification and extraction categories)
|
||||
|
||||
Calculating recall by counting the number of overlaps between the preds and target.
|
||||
"""
|
||||
@@ -160,7 +160,7 @@ def recall(preds: list, targets: list) -> dict:
|
||||
|
||||
def F1_score(preds: list, targets: list) -> dict:
|
||||
"""Calculate F1-score Metric
|
||||
(design for classifiction and extraction categories)
|
||||
(design for classification and extraction categories)
|
||||
|
||||
Calculating f1-score by counting the number of overlaps between the preds and target.
|
||||
"""
|
||||
|
Reference in New Issue
Block a user