Tonic commited on
Commit
14d6653
·
1 Parent(s): abd9f13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -15,17 +15,19 @@ model.eval()
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  model.to(device)
17
 
18
- def generate_text(prompt):
19
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
20
- tokens = model.generate(input_ids, max_length=22)
21
- return tokenizer.decode(tokens[0].tolist(), skip_special_tokens=False)
 
 
22
 
23
  iface = gr.Interface(
 
 
24
  fn=generate_text,
25
  inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
26
- outputs="text",
27
- title=title,
28
- description=description
29
  )
30
 
31
  iface.launch()
 
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  model.to(device)
17
 
18
+ def generate_text(question):
19
+ prompt = f'Q: {question}\nA: '
20
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
21
+ tokens = model.generate(input_ids, max_length=50, pad_token_id=tokenizer.eos_token_id)
22
+ response = tokenizer.decode(tokens[0], skip_special_tokens=True)
23
+ return response.split('\nA: ')[-1]
24
 
25
  iface = gr.Interface(
26
+ gr.Markdown(title),
27
+ gr.Markdown(description),
28
  fn=generate_text,
29
  inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
30
+ outputs="text"
 
 
31
  )
32
 
33
  iface.launch()