Emma02 commited on
Commit
0689b54
·
2 Parent(s): de491a7 159c62f

Merge remote-tracking branch 'origin/main'

Browse files
Files changed (1) hide show
  1. app.py +5 -0
app.py CHANGED
@@ -90,6 +90,10 @@ def main(_):
90
  image_list = gr.State([])
91
  gr.Markdown('# LVM Demo')
92
  gr.Markdown(f'Serving model: {FLAGS.checkpoint}')
 
 
 
 
93
  gr.Markdown('## Inputs')
94
  with gr.Row():
95
  upload_drag = gr.File(
@@ -217,6 +221,7 @@ def main(_):
217
  generate_btn: gr.update(interactive=True),
218
  }
219
 
 
220
  for group_name, group_images in example_groups.items():
221
  with gr.Row():
222
  with gr.Column(scale=3):
 
90
  image_list = gr.State([])
91
  gr.Markdown('# LVM Demo')
92
  gr.Markdown(f'Serving model: {FLAGS.checkpoint}')
93
+
94
+ gr.Markdown('**There are mainly two visual prompting: sequential prompting and analogy prompting.**')
95
+ gr.Markdown('**For analogy prompting: describe the task with few-shot examples, which is pairs of (x, y) inputs where x is the input image and y the "annotated" image. And add one query image in the end. Download the few-shot examples dataset at [this link](https://livejohnshopkins-my.sharepoint.com/:f:/g/personal/ybai20_jh_edu/Ei0xiLdFFqJPnwAlFWar29EBUAvB0O3CVaJykZl-f11KDQ?e=Bx9SXZ), and you can simply change the query image in the end for testing.**')
96
+ gr.Markdown('**For sequential prompting, input a sequence of continuous frames and let the model generate the next one. Please refer to the default examples below.**')
97
  gr.Markdown('## Inputs')
98
  with gr.Row():
99
  upload_drag = gr.File(
 
221
  generate_btn: gr.update(interactive=True),
222
  }
223
 
224
+ gr.Markdown('## Default examples')
225
  for group_name, group_images in example_groups.items():
226
  with gr.Row():
227
  with gr.Column(scale=3):