Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,11 +7,11 @@ model_path='KhantKyaw/GPT2_chatbot2'
|
|
7 |
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
|
8 |
tokenizer.pad_token = tokenizer.eos_token
|
9 |
model = GPT2LMHeadModel.from_pretrained(model_path)
|
10 |
-
prompt = st.chat_input(placeholder="Say Something!",key=None, max_chars=None, disabled=False, on_submit=None, args=None, kwargs=None)
|
11 |
|
12 |
-
|
|
|
13 |
|
14 |
-
output_sequences = model.generate(
|
15 |
input_ids=input_ids,
|
16 |
max_length=100,
|
17 |
temperature=1.0,
|
@@ -22,8 +22,10 @@ output_sequences = model.generate(
|
|
22 |
pad_token_id=tokenizer.eos_token_id,
|
23 |
)
|
24 |
|
25 |
-
generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
|
|
|
26 |
|
|
|
27 |
if prompt:
|
28 |
with st.chat_message(name="AI",avatar=None):
|
29 |
-
st.write(
|
|
|
7 |
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
|
8 |
tokenizer.pad_token = tokenizer.eos_token
|
9 |
model = GPT2LMHeadModel.from_pretrained(model_path)
|
|
|
10 |
|
11 |
+
def generate_response(input_text):
|
12 |
+
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
13 |
|
14 |
+
output_sequences = model.generate(
|
15 |
input_ids=input_ids,
|
16 |
max_length=100,
|
17 |
temperature=1.0,
|
|
|
22 |
pad_token_id=tokenizer.eos_token_id,
|
23 |
)
|
24 |
|
25 |
+
generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
|
26 |
+
return generated_text
|
27 |
|
28 |
+
prompt = st.chat_input(placeholder="Say Something!",key=None, max_chars=None, disabled=False, on_submit=None, args=None, kwargs=None)
|
29 |
if prompt:
|
30 |
with st.chat_message(name="AI",avatar=None):
|
31 |
+
st.write(generate_response(prompt))
|