Soumen commited on
Commit
af00ce2
·
1 Parent(s): b231e4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -55,7 +55,6 @@ def load_models():
55
  tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
56
  model = GPT2LMHeadModel.from_pretrained('gpt2-large')
57
  return tokenizer, model
58
- tokenizer, model = load_models()
59
  # Function For Extracting Entities
60
  @st.experimental_singleton
61
  def entity_analyzer(my_text):
@@ -103,6 +102,7 @@ def main():
103
  message = st.text_area("Enter the Text","Type please ..")
104
  input_ids = tokenizer(message, return_tensors='pt').input_ids
105
  if st.button("Generate"):
 
106
  st.text("Using Hugging Face Trnsformer, Contrastive Search ..")
107
  output = model.generate(input_ids, max_length=128)
108
  st.success(tokenizer.decode(output[0], skip_special_tokens=True))
 
55
  tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
56
  model = GPT2LMHeadModel.from_pretrained('gpt2-large')
57
  return tokenizer, model
 
58
  # Function For Extracting Entities
59
  @st.experimental_singleton
60
  def entity_analyzer(my_text):
 
102
  message = st.text_area("Enter the Text","Type please ..")
103
  input_ids = tokenizer(message, return_tensors='pt').input_ids
104
  if st.button("Generate"):
105
+ tokenizer, model = load_models()
106
  st.text("Using Hugging Face Trnsformer, Contrastive Search ..")
107
  output = model.generate(input_ids, max_length=128)
108
  st.success(tokenizer.decode(output[0], skip_special_tokens=True))