add some todo Message

This commit is contained in:
genghaozhe 2024-05-17 10:55:28 +00:00
parent 013690a86b
commit 3d625ca836
4 changed files with 15 additions and 7 deletions

View File

@ -83,7 +83,7 @@ class ChunkManager:
if chunk_group:
# the chunk group is not empty
# close the last chunk
self.__close_one_chunk(chunk_group[-1])
self.__close_one_chunk(chunk_group[-1]) # chunk[-1] 满了所以关闭不能再添加然后同时scatter到ZeRO PG中
if tensor.numel() > chunk_size:
chunk_size = tensor.numel()

View File

@ -33,19 +33,22 @@ class GeminiZeROHook(ColoParamOpHook):
all_chunks = self._chunk_manager.get_chunks(params)
# wait for prefetched chunks, filter those are not prefetched
chunks_fetch_sync = self._gemini_manager.wait_chunks(all_chunks)
chunks_fetch_sync = self._gemini_manager.wait_chunks(all_chunks) # 当前要fetch的chunk
# transfer state
for p in params:
# TODO(haze188): check状态转换
self._chunk_manager.trans_tensor_state(p, TensorState.COMPUTE)
self._gemini_manager.sample_overall_data()
# evit chunks, aware of async fetched
# TODO(haze188): 可能我们prefetch的又被淘汰掉, check一下
self._gemini_manager.adjust_layout(
all_chunks, record_anyway=self._gemini_manager.placement_policy.max_prefetch > 0
)
# fetch the rest synchronously
# TODO(haze188): 1. 先prefetch还是先fetchprefetch是异步fetch是同步
for chunk in chunks_fetch_sync:
self._chunk_manager.access_chunk(chunk)

View File

@ -125,7 +125,7 @@ class GeminiManager:
self._async_works[chunk].wait()
del self._async_works[chunk]
else:
non_prefetched_chunks.append(chunk)
non_prefetched_chunks.append(chunk) # 没在之前prefetch过现在要prefetch的chunk
return tuple(non_prefetched_chunks)
def add_work(self, chunk: Chunk, work: dist.Work):
@ -154,6 +154,7 @@ class GeminiManager:
def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:
self._compute_idx += 1
# TODO(haze188): _compute_list 记录块的访问顺序
if self._warmup and (self._placement_policy.need_mem_stats or record_anyway):
self._compute_list.append(chunks)

View File

@ -45,9 +45,9 @@ class PlacementPolicy(ABC):
raise NotImplementedError
import os
rank = int(os.environ["RANK"])
# import torch.distributed as dist
# # rank = int(os.environ["RANK"])
# rank = dist.get_rank()
class StaticPlacementPolicy(PlacementPolicy):
@ -118,8 +118,10 @@ class StaticPlacementPolicy(PlacementPolicy):
def get_prefetch_chunks(self) -> List[Chunk]:
if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list
return []
# 最多有多少个异步的work
can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)
prefetch = []
# static炸就炸了dynamic可能需要我们要先分析当前运行时的内存情况分配空间或者淘汰块
for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):
for chunk in self.gemini_manager.compute_list[i]:
if len(prefetch) >= can_prefetch:
@ -238,7 +240,9 @@ class AutoPlacementPolicy(PlacementPolicy):
grads_device_map[p] = torch.device("cpu")
def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:
return [] # TODO @botbw: implement prefetching for auto
# TODO @haze188 @botbw: implement prefetching for auto
return []
class PlacementPolicyFactory: