Spaces:
Runtime error
Runtime error
v0.4
Browse files
app.py
CHANGED
|
@@ -13,14 +13,14 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 13 |
print(f"Using {device}" if device != "cpu" else "Using CPU")
|
| 14 |
|
| 15 |
def _load_model():
|
| 16 |
-
tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2", trust_remote_code=True, revision="2024-05-08")
|
| 17 |
model = AutoModelForCausalLM.from_pretrained("vikhyatk/moondream2", device_map=device, trust_remote_code=True, revision="2024-05-08")
|
| 18 |
return (model, tokenizer)
|
| 19 |
|
| 20 |
class MoonDream():
|
| 21 |
def __init__(self, model=None, tokenizer=None):
|
| 22 |
self.model, self.tokenizer = (model, tokenizer)
|
| 23 |
-
if not model or not tokenizer:
|
| 24 |
self.model, self.tokenizer = _load_model()
|
| 25 |
self.device = device
|
| 26 |
self.model.to(self.device)
|
|
@@ -89,12 +89,13 @@ Complete Description:
|
|
| 89 |
return res.choices[0].text.split("```")[0]
|
| 90 |
|
| 91 |
def xform_image_description(img, inst):
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
|
|
|
| 98 |
|
| 99 |
with gr.Blocks() as demo:
|
| 100 |
with gr.Row(visible=True):
|
|
@@ -110,6 +111,8 @@ with gr.Blocks() as demo:
|
|
| 110 |
im1 = gr.Image(label="image 1", type='pil')
|
| 111 |
with gr.Column():
|
| 112 |
im2 = gr.Image(label="image 2", type='pil')
|
|
|
|
|
|
|
| 113 |
with gr.Row():
|
| 114 |
btn2 = gr.Button("submit batch")
|
| 115 |
with gr.Row():
|
|
@@ -117,8 +120,6 @@ with gr.Blocks() as demo:
|
|
| 117 |
otp2 = gr.Textbox(label="individual batch output (left)", interactive=True)
|
| 118 |
with gr.Column():
|
| 119 |
otp3 = gr.Textbox(label="individual batch output (right)", interactive=True)
|
| 120 |
-
with gr.Row():
|
| 121 |
-
minst = gr.Textbox(label="Merge Instructions")
|
| 122 |
with gr.Row():
|
| 123 |
btn_scd = gr.Button("Merge Descriptions to Single Combined Description")
|
| 124 |
with gr.Row():
|
|
|
|
| 13 |
print(f"Using {device}" if device != "cpu" else "Using CPU")
|
| 14 |
|
| 15 |
def _load_model():
|
| 16 |
+
tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2", trust_remote_code=True, revision="2024-05-08", torch_dtype=(torch.bfloat16 if device == 'cuda' else torch.float32))
|
| 17 |
model = AutoModelForCausalLM.from_pretrained("vikhyatk/moondream2", device_map=device, trust_remote_code=True, revision="2024-05-08")
|
| 18 |
return (model, tokenizer)
|
| 19 |
|
| 20 |
class MoonDream():
|
| 21 |
def __init__(self, model=None, tokenizer=None):
|
| 22 |
self.model, self.tokenizer = (model, tokenizer)
|
| 23 |
+
if not model or model is None or not tokenizer or tokenizer is None:
|
| 24 |
self.model, self.tokenizer = _load_model()
|
| 25 |
self.device = device
|
| 26 |
self.model.to(self.device)
|
|
|
|
| 89 |
return res.choices[0].text.split("```")[0]
|
| 90 |
|
| 91 |
def xform_image_description(img, inst):
|
| 92 |
+
#md = MoonDream()
|
| 93 |
+
from together import Together
|
| 94 |
+
desc = dual_images(img)
|
| 95 |
+
tog = Together(api_key=os.getenv("TOGETHER_KEY"))
|
| 96 |
+
prompt=f"""Describe the image in aggressively verbose detail. I must know every freckle upon a man's brow and each blade of the grass intimately.\nDescription: ```text\n{desc}\n```\nInstructions:\n```text\n{inst}\n```\n\n\n---\nDetailed Description:\n```text"""
|
| 97 |
+
res = tog.completions.create(prompt=prompt, model="meta-llama/Meta-Llama-3-70B", stop=["```"], max_tokens=1024)
|
| 98 |
+
return res.choices[0].text[len(prompt):].split("```")[0]
|
| 99 |
|
| 100 |
with gr.Blocks() as demo:
|
| 101 |
with gr.Row(visible=True):
|
|
|
|
| 111 |
im1 = gr.Image(label="image 1", type='pil')
|
| 112 |
with gr.Column():
|
| 113 |
im2 = gr.Image(label="image 2", type='pil')
|
| 114 |
+
with gr.Row():
|
| 115 |
+
minst = gr.Textbox(label="Merge Instructions")
|
| 116 |
with gr.Row():
|
| 117 |
btn2 = gr.Button("submit batch")
|
| 118 |
with gr.Row():
|
|
|
|
| 120 |
otp2 = gr.Textbox(label="individual batch output (left)", interactive=True)
|
| 121 |
with gr.Column():
|
| 122 |
otp3 = gr.Textbox(label="individual batch output (right)", interactive=True)
|
|
|
|
|
|
|
| 123 |
with gr.Row():
|
| 124 |
btn_scd = gr.Button("Merge Descriptions to Single Combined Description")
|
| 125 |
with gr.Row():
|