ejschwartz commited on
Commit
4954b56
·
1 Parent(s): 374968b
Files changed (1) hide show
  1. app.py +27 -9
app.py CHANGED
@@ -14,7 +14,7 @@ huggingface_hub.login(token=hf_key)
14
 
15
  tokenizer = AutoTokenizer.from_pretrained(
16
  "bigcode/starcoderbase-3b"
17
- #, use_auth_token=hf_key
18
  )
19
  vardecoder_model = AutoModelForCausalLM.from_pretrained(
20
  "ejschwartz/resym-vardecoder", torch_dtype=torch.bfloat16, device_map="auto"
@@ -40,18 +40,36 @@ example = """{
40
  @spaces.GPU
41
  def infer(input):
42
  line = json.loads(input)
43
- first_token = line['output'].split(':')[0]
44
- prompt = line['input'] + first_token + ':'
45
 
46
- input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda()[:, : 8192 - 1024]
47
  output = vardecoder_model.generate(
48
- input_ids=input_ids, max_new_tokens=1024, num_beams=4, num_return_sequences=1, do_sample=False,
49
- early_stopping=False, pad_token_id=0, eos_token_id=0
 
 
 
 
 
 
50
  )[0]
51
- output = tokenizer.decode(output[input_ids.size(1): ], skip_special_tokens=True, clean_up_tokenization_spaces=True)
 
 
 
 
52
 
53
- output = first_token + ':' + output
54
  return output
55
 
56
- demo = gr.Interface(fn=infer, inputs=gr.Textbox(lines=10, placeholder=example), outputs=gr.Text())
 
 
 
 
 
 
 
 
57
  demo.launch()
 
14
 
15
  tokenizer = AutoTokenizer.from_pretrained(
16
  "bigcode/starcoderbase-3b"
17
+ # , use_auth_token=hf_key
18
  )
19
  vardecoder_model = AutoModelForCausalLM.from_pretrained(
20
  "ejschwartz/resym-vardecoder", torch_dtype=torch.bfloat16, device_map="auto"
 
40
  @spaces.GPU
41
  def infer(input):
42
  line = json.loads(input)
43
+ first_token = line["output"].split(":")[0]
44
+ prompt = line["input"] + first_token + ":"
45
 
46
+ input_ids = tokenizer.encode(prompt, return_tensors="pt").cuda()[:, : 8192 - 1024]
47
  output = vardecoder_model.generate(
48
+ input_ids=input_ids,
49
+ max_new_tokens=1024,
50
+ num_beams=4,
51
+ num_return_sequences=1,
52
+ do_sample=False,
53
+ early_stopping=False,
54
+ pad_token_id=0,
55
+ eos_token_id=0,
56
  )[0]
57
+ output = tokenizer.decode(
58
+ output[input_ids.size(1) :],
59
+ skip_special_tokens=True,
60
+ clean_up_tokenization_spaces=True,
61
+ )
62
 
63
+ output = first_token + ":" + output
64
  return output
65
 
66
+
67
+ demo = gr.Interface(
68
+ fn=infer,
69
+ inputs=[
70
+ gr.Text(label="First Token", placeholder="a1"),
71
+ gr.Textbox(lines=10, placeholder=example),
72
+ ],
73
+ outputs=gr.Text(label="Var Decoder Output"),
74
+ )
75
  demo.launch()