Spaces:
Running
on
Zero
Running
on
Zero
bugfix
Browse files
app.py
CHANGED
@@ -103,7 +103,6 @@ def upload_image_to_s3(image, account_id, access_key, secret_key, bucket_name):
|
|
103 |
buffer = BytesIO()
|
104 |
image.save(buffer, "PNG")
|
105 |
buffer.seek(0)
|
106 |
-
print("start to upload")
|
107 |
s3.upload_fileobj(buffer, bucket_name, image_file)
|
108 |
print("upload finish", image_file)
|
109 |
return image_file
|
@@ -112,16 +111,18 @@ def upload_image_to_s3(image, account_id, access_key, secret_key, bucket_name):
|
|
112 |
|
113 |
@spaces.GPU(enable_queue=True)
|
114 |
def process(image, image_url, prompt, n_prompt, num_steps, guidance_scale, control_strength, seed, upload_to_s3, account_id, access_key, secret_key, bucket):
|
115 |
-
|
116 |
if image_url:
|
|
|
117 |
orginal_image = load_image(image_url)
|
118 |
else:
|
119 |
orginal_image = Image.fromarray(image)
|
120 |
|
121 |
size = (orginal_image.size[0], orginal_image.size[1])
|
122 |
-
print(size)
|
123 |
depth_image = get_depth_map(orginal_image)
|
124 |
generator = torch.Generator().manual_seed(seed)
|
|
|
125 |
generated_image = pipe(
|
126 |
prompt=prompt,
|
127 |
negative_prompt=n_prompt,
|
@@ -140,7 +141,7 @@ def process(image, image_url, prompt, n_prompt, num_steps, guidance_scale, contr
|
|
140 |
else:
|
141 |
result = {"status": "success", "message": "Image generated but not uploaded"}
|
142 |
|
143 |
-
return [[
|
144 |
|
145 |
with gr.Blocks() as demo:
|
146 |
|
@@ -171,7 +172,7 @@ with gr.Blocks() as demo:
|
|
171 |
|
172 |
|
173 |
with gr.Column():
|
174 |
-
|
175 |
logs = gr.Textbox(label="logs")
|
176 |
|
177 |
inputs = [
|
@@ -198,7 +199,7 @@ with gr.Blocks() as demo:
|
|
198 |
).then(
|
199 |
fn=process,
|
200 |
inputs=inputs,
|
201 |
-
outputs=[
|
202 |
api_name=False
|
203 |
)
|
204 |
|
|
|
103 |
buffer = BytesIO()
|
104 |
image.save(buffer, "PNG")
|
105 |
buffer.seek(0)
|
|
|
106 |
s3.upload_fileobj(buffer, bucket_name, image_file)
|
107 |
print("upload finish", image_file)
|
108 |
return image_file
|
|
|
111 |
|
112 |
@spaces.GPU(enable_queue=True)
|
113 |
def process(image, image_url, prompt, n_prompt, num_steps, guidance_scale, control_strength, seed, upload_to_s3, account_id, access_key, secret_key, bucket):
|
114 |
+
print("process start")
|
115 |
if image_url:
|
116 |
+
print(image_url)
|
117 |
orginal_image = load_image(image_url)
|
118 |
else:
|
119 |
orginal_image = Image.fromarray(image)
|
120 |
|
121 |
size = (orginal_image.size[0], orginal_image.size[1])
|
122 |
+
print("image size", size)
|
123 |
depth_image = get_depth_map(orginal_image)
|
124 |
generator = torch.Generator().manual_seed(seed)
|
125 |
+
print(prompt, n_prompt, guidance_scale, num_steps, control_strength, )
|
126 |
generated_image = pipe(
|
127 |
prompt=prompt,
|
128 |
negative_prompt=n_prompt,
|
|
|
141 |
else:
|
142 |
result = {"status": "success", "message": "Image generated but not uploaded"}
|
143 |
|
144 |
+
return [[orginal_image, generated_image], json.dumps(result)]
|
145 |
|
146 |
with gr.Blocks() as demo:
|
147 |
|
|
|
172 |
|
173 |
|
174 |
with gr.Column():
|
175 |
+
images = ImageSlider(label="Generate images", type="pil", slider_color="pink")
|
176 |
logs = gr.Textbox(label="logs")
|
177 |
|
178 |
inputs = [
|
|
|
199 |
).then(
|
200 |
fn=process,
|
201 |
inputs=inputs,
|
202 |
+
outputs=[images, logs],
|
203 |
api_name=False
|
204 |
)
|
205 |
|