vinayakdev commited on
Commit
e51bc2f
·
1 Parent(s): 1aa8621

use pipelines in generator

Browse files
Files changed (1) hide show
  1. generator.py +7 -6
generator.py CHANGED
@@ -43,7 +43,7 @@ def load_model():
43
  return hfm, hft, tok, model
44
 
45
  hfmodel, hftokenizer, tok, model = load_model()
46
-
47
  def run_model(input_string, **generator_args):
48
  generator_args = {
49
  "max_length": 256,
@@ -53,11 +53,12 @@ def run_model(input_string, **generator_args):
53
  "early_stopping": False,
54
  }
55
  # tokenizer = att.from_pretrained("ThomasSimonini/t5-end2end-question-generation")
56
- input_string = "generate questions: " + input_string + " </s>"
57
- input_ids = hftokenizer.encode(input_string, return_tensors="pt")
58
- res = hfmodel.generate(input_ids, **generator_args)
59
- output = hftokenizer.batch_decode(res, skip_special_tokens=True)
60
- output = [item.split("<sep>") for item in output]
 
61
  return output
62
 
63
 
 
43
  return hfm, hft, tok, model
44
 
45
  hfmodel, hftokenizer, tok, model = load_model()
46
+ nlp = pipeline("e2e-qg")
47
  def run_model(input_string, **generator_args):
48
  generator_args = {
49
  "max_length": 256,
 
53
  "early_stopping": False,
54
  }
55
  # tokenizer = att.from_pretrained("ThomasSimonini/t5-end2end-question-generation")
56
+ output = nlp(input_string)
57
+ # input_string = "generate questions: " + input_string + " </s>"
58
+ # input_ids = hftokenizer.encode(input_string, return_tensors="pt")
59
+ # res = hfmodel.generate(input_ids, **generator_args)
60
+ # output = hftokenizer.batch_decode(res, skip_special_tokens=True)
61
+ # output = [item.split("<sep>") for item in output]
62
  return output
63
 
64