prithivMLmods commited on
Commit
19675d3
·
verified ·
1 Parent(s): d106fc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -8,39 +8,19 @@ from PIL import Image
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
 
11
 
12
  DESCRIPTIONx = """## STABLE HAMSTER
13
-
14
- Drop your best results in the community: [rb.gy/klkbs7](http://rb.gy/klkbs7)
15
  """
16
 
17
-
18
- style_list = [
19
- {
20
- "name": "3840 x 2160",
21
- "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
22
- "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
23
- },
24
- {
25
- "name": "2560 x 1440",
26
- "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
27
- "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
28
- },
29
- {
30
- "name": "3D Model",
31
- "prompt": "professional 3d model {prompt}. octane render, highly detailed, volumetric, dramatic lighting",
32
- "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
33
- },
34
- ]
35
-
36
- #User -- Env -- .os -- Mode_Repo
37
-
38
  MODEL_ID = os.getenv("MODEL_REPO")
39
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
40
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
41
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
42
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
43
 
 
44
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
45
  pipe = StableDiffusionXLPipeline.from_pretrained(
46
  MODEL_ID,
@@ -50,16 +30,34 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
50
  ).to(device)
51
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
52
 
53
- # potential speedup
54
  if USE_TORCH_COMPILE:
55
  pipe.compile()
56
 
57
- # CPU offloading for Bigger RAM
58
  if ENABLE_CPU_OFFLOAD:
59
  pipe.enable_model_cpu_offload()
60
 
61
  MAX_SEED = np.iinfo(np.int32).max
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
64
  STYLE_NAMES = list(styles.keys())
65
  DEFAULT_STYLE_NAME = "3840 x 2160"
 
8
  import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
+ from typing import Tuple # Added this import
12
 
13
  DESCRIPTIONx = """## STABLE HAMSTER
 
 
14
  """
15
 
16
+ # Use environment variables for flexibility
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  MODEL_ID = os.getenv("MODEL_REPO")
18
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
19
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
20
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
21
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
22
 
23
+ # Determine device and load model outside of function for efficiency
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
25
  pipe = StableDiffusionXLPipeline.from_pretrained(
26
  MODEL_ID,
 
30
  ).to(device)
31
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
32
 
33
+ # Torch compile for potential speedup (experimental)
34
  if USE_TORCH_COMPILE:
35
  pipe.compile()
36
 
37
+ # CPU offloading for larger RAM capacity (experimental)
38
  if ENABLE_CPU_OFFLOAD:
39
  pipe.enable_model_cpu_offload()
40
 
41
  MAX_SEED = np.iinfo(np.int32).max
42
 
43
+ style_list = [
44
+ {
45
+ "name": "3840 x 2160",
46
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
47
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
48
+ },
49
+ {
50
+ "name": "2560 x 1440",
51
+ "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
52
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
53
+ },
54
+ {
55
+ "name": "3D Model",
56
+ "prompt": "professional 3d model {prompt}. octane render, highly detailed, volumetric, dramatic lighting",
57
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
58
+ },
59
+ ]
60
+
61
  styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
62
  STYLE_NAMES = list(styles.keys())
63
  DEFAULT_STYLE_NAME = "3840 x 2160"