Tapanat commited on
Commit
57342ce
·
1 Parent(s): 1103886

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -31
app.py CHANGED
@@ -1,38 +1,24 @@
1
- pip install streamlit transformers pillow
2
  import streamlit as st
3
- from transformers import CLIPProcessor, CLIPModel
4
- from PIL import Image
5
- import torch
6
 
7
- # Load the pre-trained CLIP model and processor
8
- model_name = "dandelin/vilt-b32-finetuned-vqa" # You can choose a different CLIP variant if needed
9
- model = CLIPModel.from_pretrained(model_name)
10
- processor = CLIPProcessor.from_pretrained(model_name)
11
 
12
- st.title("Visual Question Answering App")
13
 
14
- # Input image upload
15
- image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
16
 
17
- # Input question area for user input
18
- question = st.text_area("Ask a question about the image:")
 
 
 
 
 
 
19
 
20
- if image and question:
21
- # Display the uploaded image
22
- st.image(image, caption="Uploaded Image", use_column_width=True)
23
-
24
- # Process the image and question for VQA
25
- inputs = processor(text=question, images=image, return_tensors="pt")
26
-
27
- # Get the CLIP model's prediction
28
- with torch.no_grad():
29
- outputs = model(**inputs)
30
-
31
- # Extract the textual answer
32
- answer = outputs["text"]
33
-
34
- # Display the answer
35
- st.write("Answer:", answer)
36
-
37
- st.write("Powered by Hugging Face's CLIP model.")
38
 
 
 
1
  import streamlit as st
2
+ from transformers import BartForConditionalGeneration, BartTokenizer
 
 
3
 
4
+ # Load the pre-trained BART model and tokenizer
5
+ model_name = "facebook/bart-large-cnn"
6
+ model = BartForConditionalGeneration.from_pretrained(model_name)
7
+ tokenizer = BartTokenizer.from_pretrained(model_name)
8
 
9
+ st.title("Text Summarization App")
10
 
11
+ # Input text area for user input
12
+ input_text = st.text_area("Enter text to summarize:")
13
 
14
+ if st.button("Summarize"):
15
+ if input_text:
16
+ # Tokenize and summarize the input text
17
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True)
18
+ summary_ids = model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
19
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
20
+ st.subheader("Summary:")
21
+ st.write(summary)
22
 
23
+ st.write("Powered by Hugging Face's BART model.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24