File size: 1,594 Bytes
01eea82
 
 
 
 
 
589a2d5
01eea82
 
 
ba9f037
 
 
 
 
 
 
 
 
 
27c1055
ba9f037
01eea82
 
 
 
 
 
 
 
 
 
e29c876
 
01eea82
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import numpy as np
import gradio as gr
from transformers import GPT2Tokenizer, TrainingArguments, Trainer, GPT2LMHeadModel

tokenizer = GPT2Tokenizer.from_pretrained('mindwrapped/gpt2-lotr-fellowship', bos_token='<|startoftext|>',
                                          eos_token='<|endoftext|>', pad_token='<|pad|>')
model = GPT2LMHeadModel.from_pretrained('mindwrapped/gpt2-lotr-fellowship')


def generate_text(text, temperature):
  stop = False
  out = text
  while not stop:
    generated = tokenizer("<|startoftext|> " + out, return_tensors="pt").input_ids
    sample_outputs = model.generate(generated, do_sample=True, top_k=50, 
                                    max_length=300, top_p=0.95, temperature=float(temperature), num_return_sequences=1)
  
    for i, sample_output in enumerate(sample_outputs):
      out = tokenizer.decode(sample_output, skip_special_tokens=True)
      
    if len(out) > 150:
      stop = True

  return out


demo = gr.Interface(
  fn=generate_text, 
  inputs=[gr.Text(),gr.Slider(minimum=0.0, maximum=5.0, value=1.0, step=0.1)],
  outputs='text',
  examples=[['', 1.9],['Frodo and Sam moved quietly through the night.', 2.0], ['Frodo and Sam went to the pub. ', 3.0]],
  title='LOTR Generator',
  description='This space uses GPT2 model fine-tuned on the "The Fellowship of the Rings" to generate text. Try inputting no text to the model and messing around with the temperature.',
  article=' ![visitor badge](https://visitor-badge.glitch.me/badge?page_id=mindwrapped.gpt2-lotr-fellowship-generator-space)',
  live=False,
  )

demo.launch(debug=True)