ford442 commited on
Commit
8b39ab3
·
verified ·
1 Parent(s): 3b25c9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -22
app.py CHANGED
@@ -84,6 +84,30 @@ def find_cuda_directories(search_paths=None):
84
  #import hidet
85
  #print(dir(hidet))
86
  #import torch_tensorrt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  import pythran
88
 
89
  with open("pyt.py", "w") as f:
@@ -116,28 +140,6 @@ os.environ['DISTUTILS_USE_SDK'] = '1' # Force setuptools (distutils) to use the
116
  subprocess.run(['pythran', './pyt.py', '-DUSE_XSIMD', '-fopenmp', '-march=native'])
117
  import pyt
118
 
119
- import random
120
- import uuid
121
- import gradio as gr
122
- import numpy as np
123
- from PIL import Image
124
-
125
- import diffusers
126
- from diffusers import AutoencoderKL, StableDiffusionXLPipeline
127
- from diffusers import EulerAncestralDiscreteScheduler
128
- from typing import Tuple
129
- import paramiko
130
- import datetime
131
- import cyper
132
- from image_gen_aux import UpscaleWithModel
133
- import torch
134
- #import torch._dynamo
135
- #torch._dynamo.list_backends()
136
- import time
137
- import gc
138
-
139
- import torch.nn.functional as F
140
- from sageattention import sageattn
141
 
142
  torch.backends.cuda.matmul.allow_tf32 = False
143
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
 
84
  #import hidet
85
  #print(dir(hidet))
86
  #import torch_tensorrt
87
+ import random
88
+ import uuid
89
+ import gradio as gr
90
+ import numpy as np
91
+ from PIL import Image
92
+
93
+ import diffusers
94
+ from diffusers import AutoencoderKL, StableDiffusionXLPipeline
95
+ from diffusers import EulerAncestralDiscreteScheduler
96
+ from typing import Tuple
97
+ import paramiko
98
+ import datetime
99
+ import cyper
100
+ from image_gen_aux import UpscaleWithModel
101
+ import torch
102
+ #import torch._dynamo
103
+ #torch._dynamo.list_backends()
104
+ import time
105
+ import gc
106
+
107
+ import torch.nn.functional as F
108
+ from sageattention import sageattn
109
+
110
+
111
  import pythran
112
 
113
  with open("pyt.py", "w") as f:
 
140
  subprocess.run(['pythran', './pyt.py', '-DUSE_XSIMD', '-fopenmp', '-march=native'])
141
  import pyt
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  torch.backends.cuda.matmul.allow_tf32 = False
145
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False