NightRaven109 commited on
Commit
eeb4ef5
·
verified ·
1 Parent(s): 73b8fa0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -25
app.py CHANGED
@@ -5,7 +5,7 @@ import spaces
5
  import numpy as np
6
  from PIL import Image
7
  import safetensors.torch
8
- from huggingface_hub import hf_hub_download
9
  from accelerate import Accelerator
10
  from accelerate.utils import set_seed
11
  from diffusers import (
@@ -22,10 +22,11 @@ from myutils.wavelet_color_fix import wavelet_color_fix, adain_color_fix
22
  pipeline = None
23
  generator = None
24
  accelerator = None
 
25
 
26
  @spaces.GPU
27
  def initialize_models():
28
- global pipeline, generator, accelerator
29
 
30
  # Initialize accelerator
31
  accelerator = Accelerator(
@@ -34,50 +35,41 @@ def initialize_models():
34
  )
35
 
36
  try:
37
- # Download and load models with authentication token
 
 
 
 
 
 
38
  scheduler = DDPMScheduler.from_pretrained(
39
- "NightRaven109/CCSRModels",
40
- subfolder="stable-diffusion-2-1-base/scheduler",
41
- use_auth_token=os.environ['Read2']
42
  )
43
 
44
  text_encoder = CLIPTextModel.from_pretrained(
45
- "NightRaven109/CCSRModels",
46
- subfolder="stable-diffusion-2-1-base/text_encoder",
47
- use_auth_token=os.environ['Read2']
48
  )
49
 
50
  tokenizer = CLIPTokenizer.from_pretrained(
51
- "NightRaven109/CCSRModels",
52
- subfolder="stable-diffusion-2-1-base/tokenizer",
53
- use_auth_token=os.environ['Read2']
54
  )
55
 
56
  feature_extractor = CLIPImageProcessor.from_pretrained(
57
- "NightRaven109/CCSRModels",
58
- subfolder="stable-diffusion-2-1-base/feature_extractor",
59
- use_auth_token=os.environ['Read2']
60
  )
61
 
62
  unet = UNet2DConditionModel.from_pretrained(
63
- "NightRaven109/CCSRModels",
64
- subfolder="stable-diffusion-2-1-base/unet",
65
- use_auth_token=os.environ['Read2']
66
  )
67
 
68
  controlnet = ControlNetModel.from_pretrained(
69
- "NightRaven109/CCSRModels",
70
- subfolder="Controlnet",
71
- use_auth_token=os.environ['Read2']
72
  )
73
 
74
  vae = AutoencoderKL.from_pretrained(
75
- "NightRaven109/CCSRModels",
76
- subfolder="vae",
77
- use_auth_token=os.environ['Read2']
78
  )
79
 
80
- # Rest of the code remains the same
81
  # Freeze models
82
  for model in [vae, text_encoder, unet, controlnet]:
83
  model.requires_grad_(False)
 
5
  import numpy as np
6
  from PIL import Image
7
  import safetensors.torch
8
+ from huggingface_hub import snapshot_download
9
  from accelerate import Accelerator
10
  from accelerate.utils import set_seed
11
  from diffusers import (
 
22
  pipeline = None
23
  generator = None
24
  accelerator = None
25
+ model_path = None
26
 
27
  @spaces.GPU
28
  def initialize_models():
29
+ global pipeline, generator, accelerator, model_path
30
 
31
  # Initialize accelerator
32
  accelerator = Accelerator(
 
35
  )
36
 
37
  try:
38
+ # Download the entire repository
39
+ model_path = snapshot_download(
40
+ repo_id="NightRaven109/CCSRModels",
41
+ token=os.environ['Read2']
42
+ )
43
+
44
+ # Load models from local directory
45
  scheduler = DDPMScheduler.from_pretrained(
46
+ os.path.join(model_path, "stable-diffusion-2-1-base/scheduler")
 
 
47
  )
48
 
49
  text_encoder = CLIPTextModel.from_pretrained(
50
+ os.path.join(model_path, "stable-diffusion-2-1-base/text_encoder")
 
 
51
  )
52
 
53
  tokenizer = CLIPTokenizer.from_pretrained(
54
+ os.path.join(model_path, "stable-diffusion-2-1-base/tokenizer")
 
 
55
  )
56
 
57
  feature_extractor = CLIPImageProcessor.from_pretrained(
58
+ os.path.join(model_path, "stable-diffusion-2-1-base/feature_extractor")
 
 
59
  )
60
 
61
  unet = UNet2DConditionModel.from_pretrained(
62
+ os.path.join(model_path, "stable-diffusion-2-1-base/unet")
 
 
63
  )
64
 
65
  controlnet = ControlNetModel.from_pretrained(
66
+ os.path.join(model_path, "Controlnet")
 
 
67
  )
68
 
69
  vae = AutoencoderKL.from_pretrained(
70
+ os.path.join(model_path, "vae")
 
 
71
  )
72
 
 
73
  # Freeze models
74
  for model in [vae, text_encoder, unet, controlnet]:
75
  model.requires_grad_(False)