File size: 1,640 Bytes
6c27fdd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/usr/bin/env python3
import logging
import torch
import diffusers

log = logging.getLogger("test")
log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s')

log.info(f'loaded: torch={torch.__version__} diffusers={diffusers.__version__}')

prompt_positive = 'futuristic city'
prompt_negative = 'grass'
seeds = [42]
model_path = "runwayml/stable-diffusion-v1-5"
embedding_path_ok = "sd15_text_inv.pt"
# embedding_path_err = "./sd21_text_inv.pt"
device = 'cuda:0'
load_args = {
    "low_cpu_mem_usage": True,
    "torch_dtype": torch.float16,
    "variant": 'fp16',
    "safety_checker": None,
    "load_safety_checker": False,
    # "local_files_only": False,
}
pipe = diffusers.StableDiffusionPipeline.from_pretrained(model_path, **load_args)
pipe.set_progress_bar_config(bar_format='Progress {rate_fmt}{postfix} {bar} {percentage:3.0f}% {n_fmt}/{total_fmt} {elapsed} {remaining} ' + '\x1b[38;5;71m', ncols=80, colour='#327fba')
pipe.enable_model_cpu_offload()
args = {
    'prompt': [prompt_positive],
    'negative_prompt': [prompt_negative],
    'guidance_scale': 6,
    'generator': [torch.Generator(device).manual_seed(s) for s in seeds],
    'output_type': 'pil',
    'num_inference_steps': 10,
    'eta': 0.0,
    'guidance_rescale': 0.7,
    'height': 512,
    'width': 512,
}

# pipe.load_lora_weights(embedding_path_ok, **load_args)
try:
    pipe.load_lora_weights(embedding_path_ok, **load_args)
except Exception as e:
    log.error(f'failed to load embeddings: {e}')

output = pipe(**args)
log.info(f'output: {output}') # this works fine