Revert "[dreambooth] fixing the incompatibity in requirements.txt (#3190) (#3378)" (#3481)

This commit is contained in:
NatalieC323
2023-04-06 20:22:52 +08:00
committed by GitHub
parent 891b8e7fac
commit fb8fae6f29
14 changed files with 98 additions and 124 deletions

View File

@@ -20,8 +20,8 @@ from imwatermark import WatermarkEncoder
from scripts.txt2img import put_watermark
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ddpm import LatentDiffusion
from utils import replace_module, getModelSize
@@ -36,7 +36,7 @@ def load_model_from_config(config, ckpt, verbose=False):
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = LatentDiffusion(**config.model.get("params", dict()))
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")

View File

@@ -4,7 +4,7 @@ from PIL import Image
from tqdm import tqdm
import numpy as np
import torch
from ldm.models.diffusion.ddpm import LatentgDiffusion
from main import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
@@ -57,7 +57,7 @@ if __name__ == "__main__":
print(f"Found {len(masks)} inputs.")
config = OmegaConf.load("models/ldm/inpainting_big/config.yaml")
model = LatentDiffusion(**config.model.get("params", dict()))
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load("models/ldm/inpainting_big/last.ckpt")["state_dict"],
strict=False)

View File

@@ -13,10 +13,9 @@ import scann
import time
from multiprocessing import cpu_count
from ldm.util import parallel_data_prefetch
from ldm.util import instantiate_from_config, parallel_data_prefetch
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.modules.encoders.modules import FrozenClipImageEmbedder, FrozenCLIPTextEmbedder
DATABASES = [
@@ -45,7 +44,7 @@ def load_model_from_config(config, ckpt, verbose=False):
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = LatentDiffusion(**config.model.get("params", dict()))
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")

View File

@@ -8,6 +8,7 @@ from omegaconf import OmegaConf
from PIL import Image
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config
rescale = lambda x: (x + 1.) / 2.
@@ -217,7 +218,7 @@ def get_parser():
def load_model_from_config(config, sd):
model = LatentDiffusion(**config.get("params", dict()))
model = instantiate_from_config(config)
model.load_state_dict(sd,strict=False)
model.cuda()
model.eval()

View File

@@ -9,7 +9,7 @@ from diffusers import StableDiffusionPipeline
import torch
from ldm.util import instantiate_from_config
from main import get_parser
from ldm.modules.diffusionmodules.openaimodel import UNetModel
if __name__ == "__main__":
with torch.no_grad():
yaml_path = "../../train_colossalai.yaml"
@@ -17,7 +17,7 @@ if __name__ == "__main__":
config = f.read()
base_config = yaml.load(config, Loader=yaml.FullLoader)
unet_config = base_config['model']['params']['unet_config']
diffusion_model = UNetModel(**unet_config.get("params", dict())).to("cuda:0")
diffusion_model = instantiate_from_config(unet_config).to("cuda:0")
pipe = StableDiffusionPipeline.from_pretrained(
"/data/scratch/diffuser/stable-diffusion-v1-4"

View File

@@ -16,9 +16,9 @@ from torch import autocast
from contextlib import nullcontext
from imwatermark import WatermarkEncoder
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.models.diffusion.dpm_solver import DPMSolverSampler
from utils import replace_module, getModelSize
@@ -35,7 +35,7 @@ def load_model_from_config(config, ckpt, verbose=False):
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = LatentDiffusion(**config.model.get("params", dict()))
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")