Spaces:
Runtime error
Runtime error
smellslikeml
commited on
Commit
·
1cd40cd
1
Parent(s):
9799832
update app
Browse files
app.py
CHANGED
|
@@ -1,13 +1,10 @@
|
|
| 1 |
"""SpaceLlama3.1 demo gradio app."""
|
| 2 |
|
| 3 |
-
"""SpaceLlama3.1 demo gradio app."""
|
| 4 |
-
|
| 5 |
import datetime
|
| 6 |
import logging
|
| 7 |
import os
|
| 8 |
|
| 9 |
import gradio as gr
|
| 10 |
-
import requests
|
| 11 |
import torch
|
| 12 |
import PIL.Image
|
| 13 |
from prismatic import load
|
|
@@ -46,7 +43,7 @@ def compute(image, prompt, model_location):
|
|
| 46 |
|
| 47 |
# Set device and load the model
|
| 48 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 49 |
-
vlm = load(model_location)
|
| 50 |
vlm.to(device, dtype=torch.bfloat16)
|
| 51 |
|
| 52 |
# Prepare prompt
|
|
@@ -93,19 +90,19 @@ def create_app():
|
|
| 93 |
|
| 94 |
# Button event handlers
|
| 95 |
run.click(
|
| 96 |
-
compute,
|
| 97 |
-
[image, prompt, model_location],
|
| 98 |
-
highlighted_text,
|
| 99 |
)
|
| 100 |
-
clear.click(reset, None, [prompt, image])
|
| 101 |
|
| 102 |
# Status
|
| 103 |
status = gr.Markdown(f"Startup: {datetime.datetime.now()}")
|
| 104 |
gpu_kind = gr.Markdown(f"GPU=?")
|
| 105 |
demo.load(
|
| 106 |
-
lambda: [f"Model `{model_location}` loaded."],
|
| 107 |
-
None,
|
| 108 |
-
model_info,
|
| 109 |
)
|
| 110 |
|
| 111 |
return demo
|
|
@@ -120,3 +117,4 @@ if __name__ == "__main__":
|
|
| 120 |
logging.info('environ["%s"] = %r', k, v)
|
| 121 |
|
| 122 |
create_app().queue().launch()
|
|
|
|
|
|
| 1 |
"""SpaceLlama3.1 demo gradio app."""
|
| 2 |
|
|
|
|
|
|
|
| 3 |
import datetime
|
| 4 |
import logging
|
| 5 |
import os
|
| 6 |
|
| 7 |
import gradio as gr
|
|
|
|
| 8 |
import torch
|
| 9 |
import PIL.Image
|
| 10 |
from prismatic import load
|
|
|
|
| 43 |
|
| 44 |
# Set device and load the model
|
| 45 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 46 |
+
vlm = load(model_location) # No need to pass the token again
|
| 47 |
vlm.to(device, dtype=torch.bfloat16)
|
| 48 |
|
| 49 |
# Prepare prompt
|
|
|
|
| 90 |
|
| 91 |
# Button event handlers
|
| 92 |
run.click(
|
| 93 |
+
fn=compute,
|
| 94 |
+
inputs=[image, prompt, model_location],
|
| 95 |
+
outputs=highlighted_text,
|
| 96 |
)
|
| 97 |
+
clear.click(fn=reset, inputs=None, outputs=[prompt, image])
|
| 98 |
|
| 99 |
# Status
|
| 100 |
status = gr.Markdown(f"Startup: {datetime.datetime.now()}")
|
| 101 |
gpu_kind = gr.Markdown(f"GPU=?")
|
| 102 |
demo.load(
|
| 103 |
+
fn=lambda: [f"Model `{model_location}` loaded."],
|
| 104 |
+
inputs=None,
|
| 105 |
+
outputs=model_info,
|
| 106 |
)
|
| 107 |
|
| 108 |
return demo
|
|
|
|
| 117 |
logging.info('environ["%s"] = %r', k, v)
|
| 118 |
|
| 119 |
create_app().queue().launch()
|
| 120 |
+
|