KhantKyaw commited on
Commit
de171eb
·
verified ·
1 Parent(s): 64c6e7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -3
app.py CHANGED
@@ -10,7 +10,6 @@ model = GPT2LMHeadModel.from_pretrained(model_path)
10
  prompt = st.chat_input(placeholder="Say Something!",key=None, max_chars=None, disabled=False, on_submit=None, args=None, kwargs=None)
11
 
12
  input_ids = tokenizer.encode(prompt, return_tensors='pt')
13
- print(input_ids)
14
 
15
  output_sequences = model.generate(
16
  input_ids=input_ids,
@@ -22,8 +21,7 @@ output_sequences = model.generate(
22
  do_sample=True,
23
  pad_token_id=tokenizer.eos_token_id,
24
  )
25
-
26
- print(output_sequences)
27
  generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
28
 
29
  if prompt:
 
10
  prompt = st.chat_input(placeholder="Say Something!",key=None, max_chars=None, disabled=False, on_submit=None, args=None, kwargs=None)
11
 
12
  input_ids = tokenizer.encode(prompt, return_tensors='pt')
 
13
 
14
  output_sequences = model.generate(
15
  input_ids=input_ids,
 
21
  do_sample=True,
22
  pad_token_id=tokenizer.eos_token_id,
23
  )
24
+
 
25
  generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
26
 
27
  if prompt: