Update app.py
Browse files
app.py
CHANGED
@@ -73,11 +73,10 @@ st.markdown("""
|
|
73 |
</style>
|
74 |
""", unsafe_allow_html=True)
|
75 |
|
76 |
-
# Load the model
|
77 |
-
with
|
78 |
-
|
79 |
-
|
80 |
-
st.error("The loaded model is not valid.")
|
81 |
|
82 |
def write_top_bar():
|
83 |
col1, col2, col3 = st.columns([1,10,2])
|
@@ -110,8 +109,10 @@ def handle_input():
|
|
110 |
if len(chat_history) == MAX_HISTORY_LENGTH:
|
111 |
chat_history = chat_history[:-1]
|
112 |
|
113 |
-
|
114 |
-
|
|
|
|
|
115 |
|
116 |
chat_history.append((input, answer))
|
117 |
|
|
|
73 |
</style>
|
74 |
""", unsafe_allow_html=True)
|
75 |
|
76 |
+
# Load the model and tokenizer from Hugging Face Hub
|
77 |
+
model_name = "your_model_name" # Replace with the actual model name
|
78 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
79 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
80 |
|
81 |
def write_top_bar():
|
82 |
col1, col2, col3 = st.columns([1,10,2])
|
|
|
109 |
if len(chat_history) == MAX_HISTORY_LENGTH:
|
110 |
chat_history = chat_history[:-1]
|
111 |
|
112 |
+
# Generate response using the model
|
113 |
+
inputs = tokenizer.encode(input, return_tensors="pt")
|
114 |
+
outputs = model.generate(inputs)
|
115 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
116 |
|
117 |
chat_history.append((input, answer))
|
118 |
|