ktrndy commited on
Commit
2be0274
·
verified ·
1 Parent(s): 384ff0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -16
app.py CHANGED
@@ -7,37 +7,32 @@ from diffusers import DiffusionPipeline
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.float16
14
  else:
15
  torch_dtype = torch.float32
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
22
 
23
 
24
  # @spaces.GPU #[uncomment to use ZeroGPU]
25
  def infer(
 
26
  prompt,
27
  negative_prompt,
28
- seed,
29
- randomize_seed,
30
  width,
31
  height,
32
- guidance_scale,
33
- num_inference_steps,
34
  progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
  generator = torch.Generator().manual_seed(seed)
40
-
 
 
41
  image = pipe(
42
  prompt=prompt,
43
  negative_prompt=negative_prompt,
@@ -47,8 +42,8 @@ def infer(
47
  height=height,
48
  generator=generator,
49
  ).images[0]
50
-
51
- return image, seed
52
 
53
 
54
  examples = [
 
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model_id_default = "CompVis/stable-diffusion-v1-4" # Replace to the model you would like to use
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.float16
14
  else:
15
  torch_dtype = torch.float32
16
 
 
 
 
 
17
  MAX_IMAGE_SIZE = 1024
18
 
19
 
20
  # @spaces.GPU #[uncomment to use ZeroGPU]
21
  def infer(
22
+ model_id=model_id_default,
23
  prompt,
24
  negative_prompt,
25
+ seed=42,
 
26
  width,
27
  height,
28
+ guidance_scale=7.0,
29
+ num_inference_steps=20,
30
  progress=gr.Progress(track_tqdm=True),
31
+ ):
 
 
 
32
  generator = torch.Generator().manual_seed(seed)
33
+ pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype)
34
+ pipe = pipe.to(device)
35
+
36
  image = pipe(
37
  prompt=prompt,
38
  negative_prompt=negative_prompt,
 
42
  height=height,
43
  generator=generator,
44
  ).images[0]
45
+
46
+ return image
47
 
48
 
49
  examples = [