Update app.py
Browse files
app.py
CHANGED
@@ -65,7 +65,7 @@ def extract_image(image, text, prob, num=1):
|
|
65 |
return fi[0]['image'], fi[0]['score']
|
66 |
|
67 |
title = "ClipnCrop"
|
68 |
-
description = "Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers"
|
69 |
examples=[['ex3.jpg', 'black bag', 0.96],['ex2.jpg', 'man in red dress', 0.85]]
|
70 |
article = "<p style='text-align: center'><a href='https://github.com/Vishnunkumar/clipcrop' target='_blank'>clipcrop</a></p>"
|
71 |
gr.Interface(fn=extract_image, inputs=[i1, i2, i3], outputs=[o1, o2], title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|
|
|
65 |
return fi[0]['image'], fi[0]['score']
|
66 |
|
67 |
title = "ClipnCrop"
|
68 |
+
description = "<p style= 'color:white'>Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers, if the similarity score is not so much, then please consider the prediction to be void.</p>"
|
69 |
examples=[['ex3.jpg', 'black bag', 0.96],['ex2.jpg', 'man in red dress', 0.85]]
|
70 |
article = "<p style='text-align: center'><a href='https://github.com/Vishnunkumar/clipcrop' target='_blank'>clipcrop</a></p>"
|
71 |
gr.Interface(fn=extract_image, inputs=[i1, i2, i3], outputs=[o1, o2], title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|