It is recommended to use np.asarray instead of np.array to avoid unnecessary copies of the data

This commit is contained in:
monster29000 2024-08-17 08:05:10 +08:00
parent 26493b97d3
commit d21ac03a0c
24 changed files with 73 additions and 73 deletions

View File

@ -60,7 +60,7 @@ class DatasetEvaluator(object):
flag = False flag = False
logits = [] logits = []
for i, sample in enumerate(self.data[category]["data"]): for i, sample in enumerate(self.data[category]["data"]):
if np.any(np.isnan(np.array(list(sample["logits_over_choices"].values())))): if np.any(np.isnan(np.asarray(list(sample["logits_over_choices"].values())))):
if not flag: if not flag:
print( print(
f"NaN in the logits, switch to exact match for category {category} in dataset {self.dataset_name} in model {self.model_name}." f"NaN in the logits, switch to exact match for category {category} in dataset {self.dataset_name} in model {self.model_name}."
@ -81,10 +81,10 @@ class DatasetEvaluator(object):
) )
logits.append(references[i] if score == 1 else -1) logits.append(references[i] if score == 1 else -1)
else: else:
logits.append(np.argmax(np.array(list(sample["logits_over_choices"].values())))) logits.append(np.argmax(np.asarray(list(sample["logits_over_choices"].values()))))
references = np.array(references) references = np.asarray(references)
logits = np.array(logits) logits = np.asarray(logits)
scores = np.sum(references == logits) / len(self.data[category]["data"]) * 100 scores = np.sum(references == logits) / len(self.data[category]["data"]) * 100
self.evaluation_results[metric][category] = (scores, len(self.data[category]["data"])) self.evaluation_results[metric][category] = (scores, len(self.data[category]["data"]))
@ -107,7 +107,7 @@ class DatasetEvaluator(object):
flag = False flag = False
logits = [] logits = []
for i, sample in enumerate(self.data[category]["data"]): for i, sample in enumerate(self.data[category]["data"]):
if np.any(np.isnan(np.array(list(sample["logits_over_choices"].values())))): if np.any(np.isnan(np.asarray(list(sample["logits_over_choices"].values())))):
if not flag: if not flag:
print( print(
f"NaN in the logits, switch to exact match for category {category} in dataset {self.dataset_name} in model {self.model_name}." f"NaN in the logits, switch to exact match for category {category} in dataset {self.dataset_name} in model {self.model_name}."
@ -123,7 +123,7 @@ class DatasetEvaluator(object):
) )
logits.append(references[i] if score == 1 else -1) logits.append(references[i] if score == 1 else -1)
else: else:
logits.append(np.argmax(np.array(list(sample["logits_over_choices"].values())))) logits.append(np.argmax(np.asarray(list(sample["logits_over_choices"].values()))))
metric_method = eval("metric_helper." + metric) metric_method = eval("metric_helper." + metric)
@ -194,21 +194,21 @@ class DatasetEvaluator(object):
if metric == "perplexity": if metric == "perplexity":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric] weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss"]) for sample in self.data[category]["data"]] losses = [min(sample["loss"]) for sample in self.data[category]["data"]]
perplexity = np.mean(np.exp(np.array(losses))) perplexity = np.mean(np.exp(np.asarray(losses)))
self.evaluation_results["perplexity"][category] = (perplexity, len(self.data[category]["data"])) self.evaluation_results["perplexity"][category] = (perplexity, len(self.data[category]["data"]))
self.evaluation_results["perplexity"]["ALL"] += perplexity * weight self.evaluation_results["perplexity"]["ALL"] += perplexity * weight
elif metric == "ppl_score": elif metric == "ppl_score":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric] weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss"]) for sample in self.data[category]["data"]] losses = [min(sample["loss"]) for sample in self.data[category]["data"]]
perplexity_score = np.mean(np.exp(-np.array(losses))) * 100 perplexity_score = np.mean(np.exp(-np.asarray(losses))) * 100
self.evaluation_results["ppl_score"][category] = (perplexity_score, len(self.data[category]["data"])) self.evaluation_results["ppl_score"][category] = (perplexity_score, len(self.data[category]["data"]))
self.evaluation_results["ppl_score"]["ALL"] += perplexity_score * weight self.evaluation_results["ppl_score"]["ALL"] += perplexity_score * weight
elif metric == "ppl_score_over_choices" and self.data[category]["inference_kwargs"]["all_classes"] is not None: elif metric == "ppl_score_over_choices" and self.data[category]["inference_kwargs"]["all_classes"] is not None:
weight = len(self.data[category]["data"]) / self.metric_total_length[metric] weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
loss_over_choices = [sample["loss_over_choices"] for sample in self.data[category]["data"]] loss_over_choices = [sample["loss_over_choices"] for sample in self.data[category]["data"]]
perplexity_score_over_choices = np.mean(np.exp(-np.array(loss_over_choices))) * 100 perplexity_score_over_choices = np.mean(np.exp(-np.asarray(loss_over_choices))) * 100
self.evaluation_results["ppl_score_over_choices"][category] = ( self.evaluation_results["ppl_score_over_choices"][category] = (
perplexity_score_over_choices, perplexity_score_over_choices,
@ -218,14 +218,14 @@ class DatasetEvaluator(object):
elif metric == "per_byte_perplexity": elif metric == "per_byte_perplexity":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric] weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]] losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]]
perplexity = np.mean(np.exp(np.array(losses) / np.array(self.N_bytes[category]))) perplexity = np.mean(np.exp(np.asarray(losses) / np.asarray(self.N_bytes[category])))
self.evaluation_results["per_byte_perplexity"][category] = perplexity self.evaluation_results["per_byte_perplexity"][category] = perplexity
self.evaluation_results["per_byte_perplexity"]["ALL"] += perplexity * weight self.evaluation_results["per_byte_perplexity"]["ALL"] += perplexity * weight
elif metric == "per_byte_ppl_score": elif metric == "per_byte_ppl_score":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric] weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]] losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]]
perplexity_score = np.mean(np.exp(-np.array(losses) / np.array(self.N_bytes[category]))) * 100 perplexity_score = np.mean(np.exp(-np.asarray(losses) / np.asarray(self.N_bytes[category]))) * 100
self.evaluation_results["per_byte_ppl_score"][category] = perplexity_score self.evaluation_results["per_byte_ppl_score"][category] = perplexity_score
self.evaluation_results["per_byte_ppl_score"]["ALL"] += perplexity_score * weight self.evaluation_results["per_byte_ppl_score"]["ALL"] += perplexity_score * weight
@ -233,14 +233,14 @@ class DatasetEvaluator(object):
weight = len(self.data[category]["data"]) / self.metric_total_length[metric] weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]] losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]]
token_nums = [sample["token_num"][np.argmin(sample["loss_sum"])] for sample in self.data[category]["data"]] token_nums = [sample["token_num"][np.argmin(sample["loss_sum"])] for sample in self.data[category]["data"]]
perplexity = np.sum(np.array(losses)) / np.sum(np.array(token_nums)) perplexity = np.sum(np.asarray(losses)) / np.sum(np.asarray(token_nums))
self.evaluation_results["loss_over_all_tokens"][category] = perplexity self.evaluation_results["loss_over_all_tokens"][category] = perplexity
self.evaluation_results["loss_over_all_tokens"]["ALL"] += perplexity * weight self.evaluation_results["loss_over_all_tokens"]["ALL"] += perplexity * weight
# The number of tokens can be used for normalizing. # The number of tokens can be used for normalizing.
# See https://github.com/SkyworkAI/Skywork/issues/43#issuecomment-1811733834 # See https://github.com/SkyworkAI/Skywork/issues/43#issuecomment-1811733834
print(f"{self.model_name} {category} token num: {np.sum(np.array(token_nums))}") print(f"{self.model_name} {category} token num: {np.sum(np.asarray(token_nums))}")
def _evaluate(self): def _evaluate(self):
"""Calculate and return evaluation results""" """Calculate and return evaluation results"""

View File

@ -422,7 +422,7 @@ class HuggingFaceModel(BaseModel):
batch[j]["loss_over_choices"] = loss_over_choices[j] batch[j]["loss_over_choices"] = loss_over_choices[j]
if calculate_loss: if calculate_loss:
batch[j]["loss"] = (np.array(batch_losses[j]) / np.array(batch_target_token_nums[j])).tolist() batch[j]["loss"] = (np.asarray(batch_losses[j]) / np.asarray(batch_target_token_nums[j])).tolist()
# loss_sum is specially used for pertrain dataset for calculating per-byte-perplexity. # loss_sum is specially used for pertrain dataset for calculating per-byte-perplexity.
# However, loss (which is per sample loss) suffices for most cases. # However, loss (which is per sample loss) suffices for most cases.

View File

@ -109,7 +109,7 @@ class Solver:
strategies_len = [] strategies_len = []
for node in self.nodes: for node in self.nodes:
strategies_len.append(self.cost_graph.node_lens[node]) strategies_len.append(self.cost_graph.node_lens[node])
strategies_len = np.array(strategies_len) strategies_len = np.asarray(strategies_len)
# prepare following_nodes # prepare following_nodes
following_nodes = self.cost_graph.following_dict following_nodes = self.cost_graph.following_dict
@ -137,8 +137,8 @@ class Solver:
for i in range(strategies_len[src_node_index]): for i in range(strategies_len[src_node_index]):
for j in range(strategies_len[dst_node_index]): for j in range(strategies_len[dst_node_index]):
resharding_costs.append(edge_cost[(i, j)]) resharding_costs.append(edge_cost[(i, j)])
edge_pairs = np.array(edge_pairs) edge_pairs = np.asarray(edge_pairs)
resharding_costs = np.array(resharding_costs) resharding_costs = np.asarray(resharding_costs)
# prepare liveness_set # prepare liveness_set
liveness_set = self.liveness_list liveness_set = self.liveness_list
@ -184,9 +184,9 @@ class Solver:
communication_costs.append(origin_communication_cost) communication_costs.append(origin_communication_cost)
memory_costs.append(memory_cost) memory_costs.append(memory_cost)
compute_costs = np.array(compute_costs) compute_costs = np.asarray(compute_costs)
communication_costs = np.array(communication_costs) communication_costs = np.asarray(communication_costs)
memory_costs = np.array(memory_costs) memory_costs = np.asarray(memory_costs)
# omit initial value for nodes # omit initial value for nodes
s_init_np = None s_init_np = None

View File

@ -49,7 +49,7 @@ def alpa_dp_impl(
for k in range(num_layers - 1, -1, -1): for k in range(num_layers - 1, -1, -1):
for d in range(1, num_devices + 1): for d in range(1, num_devices + 1):
for m, submesh in enumerate(submesh_choices): for m, submesh in enumerate(submesh_choices):
n_submesh_devices = np.prod(np.array(submesh)) n_submesh_devices = np.prod(np.asarray(submesh))
if n_submesh_devices <= d: if n_submesh_devices <= d:
# TODO: [luzgh]: Why alpa needs max_n_succ_stages? Delete. # TODO: [luzgh]: Why alpa needs max_n_succ_stages? Delete.
# if s - 1 <= max_n_succ_stages[i, k - 1, m, n_config]: # if s - 1 <= max_n_succ_stages[i, k - 1, m, n_config]:
@ -83,7 +83,7 @@ def alpa_dp_impl(
res.append(((current_layer, next_start_layer), submesh_choice, autosharding_choice)) res.append(((current_layer, next_start_layer), submesh_choice, autosharding_choice))
current_s -= 1 current_s -= 1
current_layer = next_start_layer current_layer = next_start_layer
current_devices -= np.prod(np.array(submesh_choices[submesh_choice])) current_devices -= np.prod(np.asarray(submesh_choices[submesh_choice]))
assert current_s == 0 and current_layer == num_layers and current_devices == 0 assert current_s == 0 and current_layer == num_layers and current_devices == 0
return total_cost, res return total_cost, res
@ -98,7 +98,7 @@ def alpa_dp(
Arguments: Arguments:
submesh_choices: List[(int,int)] submesh_choices: List[(int,int)]
num_autosharding_configs: Max number of t_intra(start_layer, end_layer, LogicalMesh) num_autosharding_configs: Max number of t_intra(start_layer, end_layer, LogicalMesh)
compute_cost: np.array(num_layers,num_layers,num_submesh_choices,num_autosharding_configs) compute_cost: np.asarray(num_layers,num_layers,num_submesh_choices,num_autosharding_configs)
""" """
assert np.shape(compute_cost) == ( assert np.shape(compute_cost) == (
num_layers, num_layers,

View File

@ -34,9 +34,9 @@ class ReqQueue:
self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis
self.cache_len_list.sort(key=lambda x: -x[1]) self.cache_len_list.sort(key=lambda x: -x[1])
left_out_len_array = np.array([e[1] for e in self.cache_len_list]) left_out_len_array = np.asarray([e[1] for e in self.cache_len_list])
# assert left_out_len_array.min() >= 0 # assert left_out_len_array.min() >= 0
has_run_len_array = np.array([e[0] for e in self.cache_len_list]) has_run_len_array = np.asarray([e[0] for e in self.cache_len_list])
cum_run_len_array = np.cumsum(has_run_len_array) cum_run_len_array = np.cumsum(has_run_len_array)
size_array = np.arange(1, len(self.cache_len_list) + 1, 1) size_array = np.arange(1, len(self.cache_len_list) + 1, 1)

View File

@ -27,7 +27,7 @@ def _filter_exlarge_params(model: nn.Module, size_dict: Dict[int, List[int]]) ->
if len(agg_size_list) == 0: if len(agg_size_list) == 0:
return return
params_size_arr = np.array(agg_size_list) params_size_arr = np.asarray(agg_size_list)
std = np.std(params_size_arr) std = np.std(params_size_arr)
mean = np.mean(params_size_arr) mean = np.mean(params_size_arr)

View File

@ -118,10 +118,10 @@ class ImageNetBase(Dataset):
self.human_labels = [human_dict[s] for s in self.synsets] self.human_labels = [human_dict[s] for s in self.synsets]
labels = { labels = {
"relpath": np.array(self.relpaths), "relpath": np.asarray(self.relpaths),
"synsets": np.array(self.synsets), "synsets": np.asarray(self.synsets),
"class_label": np.array(self.class_labels), "class_label": np.asarray(self.class_labels),
"human_label": np.array(self.human_labels), "human_label": np.asarray(self.human_labels),
} }
if self.process_images: if self.process_images:
@ -346,7 +346,7 @@ class ImageNetSR(Dataset):
if not image.mode == "RGB": if not image.mode == "RGB":
image = image.convert("RGB") image = image.convert("RGB")
image = np.array(image).astype(np.uint8) image = np.asarray(image).astype(np.uint8)
min_side_len = min(image.shape[:2]) min_side_len = min(image.shape[:2])
crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
@ -364,7 +364,7 @@ class ImageNetSR(Dataset):
if self.pil_interpolation: if self.pil_interpolation:
image_pil = PIL.Image.fromarray(image) image_pil = PIL.Image.fromarray(image)
LR_image = self.degradation_process(image_pil) LR_image = self.degradation_process(image_pil)
LR_image = np.array(LR_image).astype(np.uint8) LR_image = np.asarray(LR_image).astype(np.uint8)
else: else:
LR_image = self.degradation_process(image=image)["image"] LR_image = self.degradation_process(image=image)["image"]

View File

@ -55,7 +55,7 @@ class LSUNBase(Dataset):
# default to score-sde preprocessing # default to score-sde preprocessing
img = np.array(image).astype(np.uint8) # convert image to numpy array img = np.asarray(image).astype(np.uint8) # convert image to numpy array
crop = min(img.shape[0], img.shape[1]) # crop the image to a square shape crop = min(img.shape[0], img.shape[1]) # crop the image to a square shape
( (
h, h,
@ -73,7 +73,7 @@ class LSUNBase(Dataset):
image = image.resize((self.size, self.size), resample=self.interpolation) image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip(image) # flip the image horizontally with the given probability image = self.flip(image) # flip the image horizontally with the given probability
image = np.array(image).astype(np.uint8) image = np.asarray(image).astype(np.uint8)
example["image"] = (image / 127.5 - 1.0).astype(np.float32) # normalize the image values and convert to float32 example["image"] = (image / 127.5 - 1.0).astype(np.float32) # normalize the image values and convert to float32
return example # return the example dictionary containing the image and its file paths return example # return the example dictionary containing the image and its file paths

View File

@ -86,7 +86,7 @@ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
t1 = i / num_diffusion_timesteps t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas) return np.asarray(betas)
def extract_into_tensor(a, t, x_shape): def extract_into_tensor(a, t, x_shape):

View File

@ -73,9 +73,9 @@ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
k : kernel k : kernel
""" """
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0])) v = np.dot(np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.asarray([1.0, 0.0]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]]) V = np.asarray([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]]) D = np.asarray([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
@ -141,7 +141,7 @@ def blur(x, k):
return x return x
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10.0, noise_level=0): def gen_kernel(k_size=np.asarray([15, 15]), scale_factor=np.asarray([4, 4]), min_var=0.6, max_var=10.0, noise_level=0):
""" " """ "
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang # Kai Zhang
@ -156,7 +156,7 @@ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var
# Set COV matrix using Lambdas and Theta # Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2]) LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) Q = np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
@ -201,7 +201,7 @@ def fspecial_laplacian(alpha):
h1 = alpha / (alpha + 1) h1 = alpha / (alpha + 1)
h2 = (1 - alpha) / (alpha + 1) h2 = (1 - alpha) / (alpha + 1)
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
h = np.array(h) h = np.asarray(h)
return h return h

View File

@ -73,9 +73,9 @@ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
k : kernel k : kernel
""" """
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0])) v = np.dot(np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.asarray([1.0, 0.0]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]]) V = np.asarray([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]]) D = np.asarray([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
@ -141,7 +141,7 @@ def blur(x, k):
return x return x
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10.0, noise_level=0): def gen_kernel(k_size=np.asarray([15, 15]), scale_factor=np.asarray([4, 4]), min_var=0.6, max_var=10.0, noise_level=0):
""" " """ "
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang # Kai Zhang
@ -156,7 +156,7 @@ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var
# Set COV matrix using Lambdas and Theta # Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2]) LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) Q = np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
@ -201,7 +201,7 @@ def fspecial_laplacian(alpha):
h1 = alpha / (alpha + 1) h1 = alpha / (alpha + 1)
h2 = (1 - alpha) / (alpha + 1) h2 = (1 - alpha) / (alpha + 1)
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
h = np.array(h) h = np.asarray(h)
return h return h

View File

@ -658,7 +658,7 @@ def calculate_ssim(img1, img2, border=0):
ssims = [] ssims = []
for i in range(3): for i in range(3):
ssims.append(ssim(img1[:, :, i], img2[:, :, i])) ssims.append(ssim(img1[:, :, i], img2[:, :, i]))
return np.array(ssims).mean() return np.asarray(ssims).mean()
elif img1.shape[2] == 1: elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2)) return ssim(np.squeeze(img1), np.squeeze(img2))
else: else:

View File

@ -24,7 +24,7 @@ def log_txt_as_img(wh, xc, size=10):
except UnicodeEncodeError: except UnicodeEncodeError:
print("Cant encode string for logging. Skipping.") print("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txt = np.asarray(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt) txts.append(txt)
txts = np.stack(txts) txts = np.stack(txts)
txts = torch.tensor(txts) txts = torch.tensor(txts)

View File

@ -57,7 +57,7 @@ def load_img(path):
print(f"loaded input image of size ({w}, {h}) from {path}") print(f"loaded input image of size ({w}, {h}) from {path}")
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
image = image.resize((w, h), resample=PIL.Image.LANCZOS) image = image.resize((w, h), resample=PIL.Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0 image = np.asarray(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2) image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image) image = torch.from_numpy(image)
return 2.0 * image - 1.0 return 2.0 * image - 1.0

View File

@ -12,12 +12,12 @@ from tqdm import tqdm
def make_batch(image, mask, device): def make_batch(image, mask, device):
image = np.array(Image.open(image).convert("RGB")) image = np.asarray(Image.open(image).convert("RGB"))
image = image.astype(np.float32) / 255.0 image = image.astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2) image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image) image = torch.from_numpy(image)
mask = np.array(Image.open(mask).convert("L")) mask = np.asarray(Image.open(mask).convert("L"))
mask = mask.astype(np.float32) / 255.0 mask = mask.astype(np.float32) / 255.0
mask = mask[None, None] mask = mask[None, None]
mask[mask < 0.5] = 0 mask[mask < 0.5] = 0

View File

@ -183,7 +183,7 @@ def parse_args():
def put_watermark(img, wm_encoder=None): def put_watermark(img, wm_encoder=None):
if wm_encoder is not None: if wm_encoder is not None:
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
img = wm_encoder.encode(img, "dwtDct") img = wm_encoder.encode(img, "dwtDct")
img = Image.fromarray(img[:, :, ::-1]) img = Image.fromarray(img[:, :, ::-1])
return img return img

View File

@ -33,11 +33,11 @@ logger = get_logger(__name__)
def prepare_mask_and_masked_image(image, mask): def prepare_mask_and_masked_image(image, mask):
image = np.array(image.convert("RGB")) image = np.asarray(image.convert("RGB"))
image = image[None].transpose(0, 3, 1, 2) image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
mask = np.array(mask.convert("L")) mask = np.asarray(mask.convert("L"))
mask = mask.astype(np.float32) / 255.0 mask = mask.astype(np.float32) / 255.0
mask = mask[None, None] mask = mask[None, None]
mask[mask < 0.5] = 0 mask[mask < 0.5] = 0

View File

@ -63,7 +63,7 @@ def bench_rotor(
sample_points: int = 20, sample_points: int = 20,
free_memory: int = torch.cuda.mem_get_info()[0], free_memory: int = torch.cuda.mem_get_info()[0],
start_factor: int = 4, start_factor: int = 4,
) -> Tuple[np.array, list, list]: ) -> Tuple[np.asarray, list, list]:
"""Auto Checkpoint Rotor Algorithm benchmarking """Auto Checkpoint Rotor Algorithm benchmarking
Benchmarks the Auto Checkpoint Rotor Algorithm for a given graph module and data. Benchmarks the Auto Checkpoint Rotor Algorithm for a given graph module and data.
Args: Args:
@ -76,7 +76,7 @@ def bench_rotor(
start_factor (int, optional): Start memory budget factor for benchmark, the start memory budget start_factor (int, optional): Start memory budget factor for benchmark, the start memory budget
will be free_memory / start_factor. Defaults to 4. will be free_memory / start_factor. Defaults to 4.
Returns: Returns:
Tuple[np.array, list, list]: return budgets vector (MB), peak memory vector (MB), step time vector (MS). Tuple[np.asarray, list, list]: return budgets vector (MB), peak memory vector (MB), step time vector (MS).
""" """
peak_hist, step_hist = [], [] peak_hist, step_hist = [], []
raw_graph = deepcopy(gm.graph) raw_graph = deepcopy(gm.graph)

View File

@ -32,7 +32,7 @@ class BlendableDataset(torch.utils.data.Dataset):
self.size += len(dataset) self.size += len(dataset)
# Normalize weights. # Normalize weights.
weights = np.array(weights, dtype=np.float64) weights = np.asarray(weights, dtype=np.float64)
sum_weights = np.sum(weights) sum_weights = np.sum(weights)
assert sum_weights > 0.0 assert sum_weights > 0.0
weights /= sum_weights weights /= sum_weights

View File

@ -371,11 +371,11 @@ def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, masked_labels
# Tokens and token types. # Tokens and token types.
filler = [pad_id] * padding_length filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64) tokens_np = np.asarray(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) tokentypes_np = np.asarray(tokentypes + filler, dtype=np.int64)
# Padding mask. # Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, dtype=np.int64) padding_mask_np = np.asarray([1] * num_tokens + [0] * padding_length, dtype=np.int64)
# Lables and loss mask. # Lables and loss mask.
labels = [-1] * max_seq_length labels = [-1] * max_seq_length
@ -384,8 +384,8 @@ def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, masked_labels
assert masked_positions[i] < num_tokens assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i] labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1 loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64) labels_np = np.asarray(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64) loss_mask_np = np.asarray(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np

View File

@ -174,4 +174,4 @@ class ICTDataset(Dataset):
pad_mask = [1] * len(tokens) + [0] * num_pad pad_mask = [1] * len(tokens) + [0] * num_pad
tokens += [self.pad_id] * num_pad tokens += [self.pad_id] * num_pad
return np.array(tokens), np.array(pad_mask) return np.asarray(tokens), np.asarray(pad_mask)

View File

@ -84,7 +84,7 @@ def read_longs(f, n):
def write_longs(f, a): def write_longs(f, a):
f.write(np.array(a, dtype=np.int64)) f.write(np.asarray(a, dtype=np.int64))
dtypes = {1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: float, 7: np.double, 8: np.uint16} dtypes = {1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: float, 7: np.double, 8: np.uint16}
@ -260,7 +260,7 @@ class IndexedDatasetBuilder(object):
self.doc_idx = [0] self.doc_idx = [0]
def add_item(self, tensor): def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype)) bytes = self.out_file.write(np.asarray(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size(): for s in tensor.size():
self.sizes.append(s) self.sizes.append(s)
@ -344,15 +344,15 @@ class MMapIndexedDataset(torch.utils.data.Dataset):
self._file.write(struct.pack("<Q", len(sizes))) self._file.write(struct.pack("<Q", len(sizes)))
self._file.write(struct.pack("<Q", len(doc_idx))) self._file.write(struct.pack("<Q", len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32) sizes = np.asarray(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C")) self._file.write(sizes.tobytes(order="C"))
del sizes del sizes
pointers = np.array(pointers, dtype=np.int64) pointers = np.asarray(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C")) self._file.write(pointers.tobytes(order="C"))
del pointers del pointers
doc_idx = np.array(doc_idx, dtype=np.int64) doc_idx = np.asarray(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order="C")) self._file.write(doc_idx.tobytes(order="C"))
def __exit__(self, exc_type, exc_val, exc_tb): def __exit__(self, exc_type, exc_val, exc_tb):
@ -517,7 +517,7 @@ class MMapIndexedDatasetBuilder(object):
self._doc_idx = [0] self._doc_idx = [0]
def add_item(self, tensor): def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype) np_array = np.asarray(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C")) self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size) self._sizes.append(np_array.size)

View File

@ -40,7 +40,7 @@ def synthesize_1d_sparse_feature(
indices = torch.randint(low=0, high=num_embed, size=(indices_in_batch,), device=device, dtype=torch.long) indices = torch.randint(low=0, high=num_embed, size=(indices_in_batch,), device=device, dtype=torch.long)
offsets = ( offsets = (
torch.from_numpy( torch.from_numpy(
np.array( np.asarray(
[ [
0, 0,
*np.sort(np.random.randint(low=0, high=indices_in_batch, size=(indices_in_batch - 1,))), *np.sort(np.random.randint(low=0, high=indices_in_batch, size=(indices_in_batch - 1,))),
@ -267,7 +267,7 @@ def run_parallel_freq_aware_embed_tablewise(rank, world_size):
input0 [1,2,3] [6,7] [] input0 [1,2,3] [6,7] []
input1 [] [9] [13,15] input1 [] [9] [13,15]
input2 [1,5] [6,8] [11] input2 [1,5] [6,8] [11]
в? в? в?
rank 0 rank 0 rank 1 rank 0 rank 0 rank 1
in KJT format in KJT format
""" """

View File

@ -36,7 +36,7 @@ def test_runtime_mem_tracer():
torch.allclose(p1.to(torch.half), p2) torch.allclose(p1.to(torch.half), p2)
non_model_data_list = runtime_mem_tracer._memstats.non_model_data_list("cuda") non_model_data_list = runtime_mem_tracer._memstats.non_model_data_list("cuda")
cuda_non_model_data_list = np.array(non_model_data_list) / 1024**2 cuda_non_model_data_list = np.asarray(non_model_data_list) / 1024**2
print("cuda_non_model_data_list", len(cuda_non_model_data_list)) print("cuda_non_model_data_list", len(cuda_non_model_data_list))
print(non_model_data_list) print(non_model_data_list)