Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,7 @@ HISTORY_WEIGHT = 100 # set history weight (if found any keyword from history, it
|
|
| 9 |
|
| 10 |
@st.cache(allow_output_mutation=True)
|
| 11 |
def get_model(model):
|
| 12 |
-
return pipeline("fill-mask", model=model, top_k=
|
| 13 |
|
| 14 |
def main(nlp, semantic_model):
|
| 15 |
data_load_state = st.text('Inference to model...')
|
|
@@ -40,8 +40,13 @@ def main(nlp, semantic_model):
|
|
| 40 |
|
| 41 |
if __name__ == '__main__':
|
| 42 |
if st._is_running_with_streamlit:
|
| 43 |
-
st.
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
text = st.text_input("Enter a text for auto completion...", value='Where is Bill')
|
| 47 |
|
|
|
|
| 9 |
|
| 10 |
@st.cache(allow_output_mutation=True)
|
| 11 |
def get_model(model):
|
| 12 |
+
return pipeline("fill-mask", model=model, top_k=20)#set the maximum of tokens to be retrieved after each inference to model
|
| 13 |
|
| 14 |
def main(nlp, semantic_model):
|
| 15 |
data_load_state = st.text('Inference to model...')
|
|
|
|
| 40 |
|
| 41 |
if __name__ == '__main__':
|
| 42 |
if st._is_running_with_streamlit:
|
| 43 |
+
st.markdown("""
|
| 44 |
+
# Introduction
|
| 45 |
+
This is an example of an auto-complete approach where the next token suggested based on users's history Keyword match & Semantic similarity of users's history (log).
|
| 46 |
+
The next token is predicted per probability and a weight if it is appeared in keyword user's history or there is a similarity to semantic user's history
|
| 47 |
+
|
| 48 |
+
""")
|
| 49 |
+
history_keyword_text = st.text_input("Enter users's history <keywords match> (optional, i.e., 'Gates')", value="")
|
| 50 |
|
| 51 |
text = st.text_input("Enter a text for auto completion...", value='Where is Bill')
|
| 52 |
|