ysharma HF Staff commited on
Commit
aede36b
·
1 Parent(s): 9891a9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -19,7 +19,7 @@ PLACEHOLDER = """
19
  """
20
 
21
  model_id_llama3 = "xtuner/llava-llama-3-8b-v1_1-transformers"
22
- model_id_phi3 = "xtuner/llava-llama-3-8b-v1_1-transformers"
23
 
24
  processor = AutoProcessor.from_pretrained(model_id_llama3)
25
  processor = AutoProcessor.from_pretrained(model_id_phi3)
@@ -167,8 +167,8 @@ with gr.Blocks(fill_height=True, ) as demo:
167
 
168
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
169
 
170
- gr.Examples(examples=[[{"text": "What is on the flower?", "files": ["./bee.png"]}],
171
- [{"text": "How to make this pastry?", "files": ["./baklava.png"]},],]
172
  inputs=chat_input)
173
 
174
  #chat_input.submit(lambda: gr.MultimodalTextbox(interactive=False), None, [chat_input]).then(bot_streaming_llama3, [chat_input, chatbot1,], [chatbot1,])
 
19
  """
20
 
21
  model_id_llama3 = "xtuner/llava-llama-3-8b-v1_1-transformers"
22
+ model_id_phi3 = "xtuner/llava-phi-3-mini-hf"
23
 
24
  processor = AutoProcessor.from_pretrained(model_id_llama3)
25
  processor = AutoProcessor.from_pretrained(model_id_phi3)
 
167
 
168
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
169
 
170
+ gr.Examples(examples=[{"text": "What is on the flower?", "files": ["./bee.png"]},
171
+ {"text": "How to make this pastry?", "files": ["./baklava.png"]},]
172
  inputs=chat_input)
173
 
174
  #chat_input.submit(lambda: gr.MultimodalTextbox(interactive=False), None, [chat_input]).then(bot_streaming_llama3, [chat_input, chatbot1,], [chatbot1,])