Spaces:
Sleeping
Sleeping
Update app.py
Browse filesThis is Areax Spaces , where user can know everything about their image given by themself. Currently AreaX is using Llama3.2 -11B Vision Model.
If it is helpful for you then Give it heart !
app.py
CHANGED
@@ -530,7 +530,7 @@ model = MllamaForConditionalGeneration.from_pretrained(ckpt, torch_dtype=torch.b
|
|
530 |
processor = AutoProcessor.from_pretrained(ckpt)
|
531 |
|
532 |
@spaces.GPU
|
533 |
-
def bot_streaming(message, history, max_new_tokens=
|
534 |
txt = message["text"]
|
535 |
ext_buffer = f"{txt}"
|
536 |
|
@@ -586,7 +586,7 @@ def bot_streaming(message, history, max_new_tokens=250):
|
|
586 |
# Gradio interface setup
|
587 |
demo = gr.ChatInterface(
|
588 |
fn=bot_streaming,
|
589 |
-
title="
|
590 |
textbox=gr.MultimodalTextbox(),
|
591 |
additional_inputs=[
|
592 |
gr.Slider(
|
|
|
530 |
processor = AutoProcessor.from_pretrained(ckpt)
|
531 |
|
532 |
@spaces.GPU
|
533 |
+
def bot_streaming(message, history, max_new_tokens=1000):
|
534 |
txt = message["text"]
|
535 |
ext_buffer = f"{txt}"
|
536 |
|
|
|
586 |
# Gradio interface setup
|
587 |
demo = gr.ChatInterface(
|
588 |
fn=bot_streaming,
|
589 |
+
title="AreaX-Llama3.2-11B-Vision",
|
590 |
textbox=gr.MultimodalTextbox(),
|
591 |
additional_inputs=[
|
592 |
gr.Slider(
|