Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,8 @@ from huggingface_hub import HfFolder
|
|
9 |
HfFolder.save_token(HF_TOKEN)
|
10 |
|
11 |
import transformers
|
12 |
-
from transformers import
|
|
|
13 |
import streamlit as st
|
14 |
|
15 |
# Set Hugging Face API Token if required
|
@@ -40,8 +41,10 @@ if uploaded_file is not None:
|
|
40 |
|
41 |
# Load model directly for further analysis or different processing steps
|
42 |
st.header("Load Model Directly")
|
|
|
|
|
|
|
43 |
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
44 |
-
model = AutoModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
45 |
|
46 |
# Example of how you might use model and tokenizer directly
|
47 |
# This section can be customized based on what you need to do with the model
|
|
|
9 |
HfFolder.save_token(HF_TOKEN)
|
10 |
|
11 |
import transformers
|
12 |
+
from transformers import VisionEncoderDecoderModel, AutoTokenizer, pipeline, AutoModel
|
13 |
+
|
14 |
import streamlit as st
|
15 |
|
16 |
# Set Hugging Face API Token if required
|
|
|
41 |
|
42 |
# Load model directly for further analysis or different processing steps
|
43 |
st.header("Load Model Directly")
|
44 |
+
|
45 |
+
# Assuming 'nlpconnect/vit-gpt2-image-captioning' is your model identifier
|
46 |
+
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
47 |
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
|
|
48 |
|
49 |
# Example of how you might use model and tokenizer directly
|
50 |
# This section can be customized based on what you need to do with the model
|