iimmortall commited on
Commit
cece299
·
1 Parent(s): 41a315f

fix pycuda bugs

Browse files
Files changed (2) hide show
  1. app.py +23 -14
  2. requirements.txt +2 -5
app.py CHANGED
@@ -4,21 +4,26 @@ import sys
4
  import datetime
5
  import gradio as gr
6
  import numpy as np
7
- import random
8
  import spaces #[uncomment to use ZeroGPU]
9
- # from diffusers import DiffusionPipeline
10
  import torch
11
  from torchvision.transforms import ToTensor, ToPILImage
12
- import logging
13
- # logging.getLogger("huggingface_hub").setLevel(logging.CRITICAL)
14
- from huggingface_hub import hf_hub_download, snapshot_download
15
 
 
 
 
16
  model_name = "iimmortall/UltraFusion"
17
  auth_token = os.getenv("HF_AUTH_TOKEN")
18
- # greet_file = hf_hub_download(repo_id=model_name, filename="main.py", use_auth_token=auth_token)
19
  model_folder = snapshot_download(repo_id=model_name, token=auth_token, local_dir="/home/user/app")
20
 
21
  from ultrafusion_utils import load_model, run_ultrafusion, check_input
 
 
 
 
 
 
22
 
23
  RUN_TIMES = 0
24
 
@@ -43,14 +48,19 @@ def infer(
43
  ):
44
  print(under_expo_img.size)
45
  print("reciving image")
46
- # print(under_expo_img.orig_name, over_expo_img.orig_name)
47
 
48
- # under_expo_img = under_expo_img.resize([1500, 1000])
49
- # over_expo_img = over_expo_img.resize([1500, 1000])
50
- under_expo_img, over_expo_img = check_input(under_expo_img, over_expo_img, max_l=1500)
 
 
 
 
 
 
 
 
51
 
52
- ue = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
53
- oe = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
54
  print("num_inference_steps:", num_inference_steps)
55
  try:
56
  if num_inference_steps is None:
@@ -59,8 +69,7 @@ def infer(
59
  except Exception as e:
60
  num_inference_steps = 20
61
 
62
- out = run_ultrafusion(ue, oe, 'test', flow_model=flow_model, pipe=ultrafusion_pipe,
63
- steps=num_inference_steps, consistent_start=None)
64
 
65
  out = out.clamp(0, 1).squeeze()
66
  out_pil = to_pil(out)
 
4
  import datetime
5
  import gradio as gr
6
  import numpy as np
7
+ from PIL import Image
8
  import spaces #[uncomment to use ZeroGPU]
 
9
  import torch
10
  from torchvision.transforms import ToTensor, ToPILImage
 
 
 
11
 
12
+
13
+ # -------------------------- HuggingFace -------------------------------
14
+ from huggingface_hub import hf_hub_download, snapshot_download
15
  model_name = "iimmortall/UltraFusion"
16
  auth_token = os.getenv("HF_AUTH_TOKEN")
17
+ greet_file = hf_hub_download(repo_id=model_name, filename="main.py", use_auth_token=auth_token)
18
  model_folder = snapshot_download(repo_id=model_name, token=auth_token, local_dir="/home/user/app")
19
 
20
  from ultrafusion_utils import load_model, run_ultrafusion, check_input
21
+ PYCUDA_FLAG = True
22
+ try :
23
+ import pycuda
24
+ except Exception:
25
+ PYCUDA_FLAG = False
26
+ print("No pycuda!!!")
27
 
28
  RUN_TIMES = 0
29
 
 
48
  ):
49
  print(under_expo_img.size)
50
  print("reciving image")
 
51
 
52
+ under_expo_img_lr, over_expo_img_lr, under_expo_img, over_expo_img, use_bgu = check_input(under_expo_img, over_expo_img, max_l=1500)
53
+
54
+ global PYCUDA_FLAG
55
+ if not PYCUDA_FLAG and use_bgu:
56
+ print("No pycuda, do not run BGU.")
57
+ use_bgu = False
58
+
59
+ ue = to_tensor(under_expo_img_lr).unsqueeze(dim=0).to("cuda")
60
+ oe = to_tensor(over_expo_img_lr).unsqueeze(dim=0).to("cuda")
61
+ ue_hr = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
62
+ oe_hr = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
63
 
 
 
64
  print("num_inference_steps:", num_inference_steps)
65
  try:
66
  if num_inference_steps is None:
 
69
  except Exception as e:
70
  num_inference_steps = 20
71
 
72
+ out = run_ultrafusion(ue, oe, ue_hr, oe_hr, use_bgu, 'test', flow_model=flow_model, pipe=ultrafusion_pipe, steps=num_inference_steps, consistent_start=None, test_bs=16)
 
73
 
74
  out = out.clamp(0, 1).squeeze()
75
  out_pil = to_pil(out)
requirements.txt CHANGED
@@ -2,14 +2,11 @@ accelerate
2
  diffusers
3
  invisible_watermark
4
  transformers
5
- xformers
6
- torch
7
- torchvision==0.19.1
8
  omegaconf
9
  numpy
10
  pillow
11
  einops
12
  scipy
13
- numpy
14
  ftfy
15
- pytorch_lightning==2.4
 
 
2
  diffusers
3
  invisible_watermark
4
  transformers
 
 
 
5
  omegaconf
6
  numpy
7
  pillow
8
  einops
9
  scipy
 
10
  ftfy
11
+ pytorch_lightning==2.4
12
+ # pycuda