Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,6 @@ model = LlavaNextForConditionalGeneration.from_pretrained(
|
|
22 |
low_cpu_mem_usage=True,
|
23 |
)
|
24 |
model = PeftModel.from_pretrained(model, finetune_repo)
|
25 |
-
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
26 |
model.to("cuda:0")
|
27 |
|
28 |
|
@@ -48,12 +47,6 @@ def predict(image, input_text):
|
|
48 |
time.sleep(0.04)
|
49 |
yield generated_text_without_prompt
|
50 |
|
51 |
-
|
52 |
-
# prompt_length = inputs['input_ids'].shape[1]
|
53 |
-
# generate_ids = model.generate(**inputs, max_new_tokens=512)
|
54 |
-
# output_text = processor.batch_decode(generate_ids[:, prompt_length:], skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
55 |
-
# return output_text
|
56 |
-
|
57 |
|
58 |
image = gr.components.Image(type="pil")
|
59 |
input_prompt = gr.components.Textbox(label="Input Prompt")
|
|
|
22 |
low_cpu_mem_usage=True,
|
23 |
)
|
24 |
model = PeftModel.from_pretrained(model, finetune_repo)
|
|
|
25 |
model.to("cuda:0")
|
26 |
|
27 |
|
|
|
47 |
time.sleep(0.04)
|
48 |
yield generated_text_without_prompt
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
image = gr.components.Image(type="pil")
|
52 |
input_prompt = gr.components.Textbox(label="Input Prompt")
|