Spaces:
Sleeping
Sleeping
File size: 1,173 Bytes
21c571e 933d893 21c571e 0659652 c7ab302 21c571e 0659652 7b22e2e 0659652 ad7b7bc b5fe0df 1f69fb9 cfc942c ad7b7bc b5fe0df 21c571e ad7b7bc 0659652 dda8d7a 0659652 7b22e2e dda8d7a ff83a69 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
#import streamlit as st
#x = st.slider('Select a value')
#st.write(x, 'squared is', x * x)
import streamlit as st
from transformers import pipeline, AutoModelForMaskedLM, AutoTokenizer
st.title("Completamento del testo in Latino con Latin BERT")
st.write("Inserisci un testo con il token [MASK] per vedere le previsioni del modello.")
#dvces et reges carthaginiensivm hanno et mago qui [MASK] punico bello cornelium consulem aput liparas ceperunt
input_text = st.text_input("Testo:", value="Lorem ipsum dolor sit amet, [MASK] adipiscing elit.")
# Model based on BERT
#modelname = "./models/latin_bert/"
#modelname = "LuisAVasquez/simple-latin-bert-uncased"
modelname = "./models/bert-base-latin-uncased"
tokenizer = AutoTokenizer.from_pretrained(modelname)
model = AutoModelForMaskedLM.from_pretrained(modelname)
fill_mask = pipeline("fill-mask", model=model, tokenizer=tokenizer)
if input_text:
predictions = fill_mask(input_text)
st.subheader("Risultati delle previsioni con Simple Latin Bert:")
for pred in predictions:
st.write(f"**Parola**: {pred['token_str']}, **Probabilità**: {pred['score']:.4f}, **Sequence**: {pred['sequence']}")
|