zeddotes commited on
Commit
75aecc2
·
1 Parent(s): 3454c90

updated app with my model

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -1,27 +1,24 @@
1
  import gradio as gr
2
- from transformers import BlipProcessor, BlipForConditionalGeneration
3
  from PIL import Image
4
  import torch
 
5
 
6
- # Example with BLIP (replace with your fine-tuned model)
7
  processor = BlipProcessor.from_pretrained("zeddotes/blip-computer-thoughts")
8
  model = BlipForConditionalGeneration.from_pretrained("zeddotes/blip-computer-thoughts")
9
 
10
  def caption_image(image):
11
- if image is None:
12
- return "No image provided"
13
  inputs = processor(images=image, return_tensors="pt")
14
  with torch.no_grad():
15
- out = model.generate(**inputs)
16
- caption = processor.decode(out[0], skip_special_tokens=True)
17
  return caption
18
 
19
  demo = gr.Interface(
20
  fn=caption_image,
21
  inputs=gr.Image(type="pil"),
22
  outputs="text",
23
- title="Custom UI Action Description"
24
  )
25
 
26
  if __name__ == "__main__":
27
- demo.launch()
 
1
  import gradio as gr
 
2
  from PIL import Image
3
  import torch
4
+ from transformers import BlipProcessor, BlipForConditionalGeneration
5
 
 
6
  processor = BlipProcessor.from_pretrained("zeddotes/blip-computer-thoughts")
7
  model = BlipForConditionalGeneration.from_pretrained("zeddotes/blip-computer-thoughts")
8
 
9
  def caption_image(image):
 
 
10
  inputs = processor(images=image, return_tensors="pt")
11
  with torch.no_grad():
12
+ generated_ids = model.generate(**inputs)
13
+ caption = processor.decode(generated_ids[0], skip_special_tokens=True)
14
  return caption
15
 
16
  demo = gr.Interface(
17
  fn=caption_image,
18
  inputs=gr.Image(type="pil"),
19
  outputs="text",
20
+ title="My Fine-Tuned Model"
21
  )
22
 
23
  if __name__ == "__main__":
24
+ demo.launch()