diff --git a/nodes.py b/nodes.py index eba5bd5..8250daa 100644 --- a/nodes.py +++ b/nodes.py @@ -129,11 +129,11 @@ def INPUT_TYPES(s): "model": ("ULTRAPIXELMODEL",), "height": ( "INT", - {"default": 2048, "min": 512, "max": 5120, "step": 256}, + {"default": 2048, "min": 512, "max": 5120, "step": 8}, ), "width": ( "INT", - {"default": 2048, "min": 512, "max": 5120, "step": 256}, + {"default": 2048, "min": 512, "max": 5120, "step": 8}, ), "seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}), "dtype": (["bf16", "fp32"],), diff --git a/ultrapixel.py b/ultrapixel.py index ab99c54..2fab483 100644 --- a/ultrapixel.py +++ b/ultrapixel.py @@ -16,6 +16,7 @@ ) from .train import WurstCore_t2i as WurstCoreC +from safetensors.torch import load_file as load_safetensors class UltraPixel: def __init__( @@ -88,6 +89,9 @@ def set_config( self.prompt = prompt self.controlnet_image = controlnet_image + + + def process(self): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") torch.manual_seed(self.seed) @@ -140,11 +144,11 @@ def process(self): captions = [self.prompt] height, width = self.height, self.width - - sdd = torch.load(self.pretrained, map_location="cpu") - collect_sd = {} - for k, v in sdd.items(): - collect_sd[k[7:]] = v + + sdd = load_safetensors(self.pretrained) # this is the equivalent code for loading the real safetensors versions of ultrapixel_t2i and lora_cat. + collect_sd = {k: v for k, v in sdd.items()} + collect_sd = {k[7:] if k.startswith('module.') else k: v for k, v in collect_sd.items()} + models.train_norm.load_state_dict(collect_sd) if self.controlnet_image == None: models.train_norm.load_state_dict(collect_sd) @@ -154,8 +158,8 @@ def process(self): load_or_fail(self.controlnet), strict=True ) - models.generator.eval() - models.train_norm.eval() + models.generator.eval() # stage C + models.train_norm.eval() # stage UP batch_size = 1 edge_image = None