fl399 commited on
Commit
44a663b
·
1 Parent(s): 2c3ce4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -19
app.py CHANGED
@@ -135,25 +135,28 @@ def evaluate(
135
  ):
136
  prompt = _TEMPLATE + "\n" + _add_markup(table) + "\n" + "Q: " + question + "\n" + "A:"
137
  if llm == "alpaca-lora":
138
- inputs = tokenizer(prompt, return_tensors="pt")
139
- input_ids = inputs["input_ids"].to(device)
140
- generation_config = GenerationConfig(
141
- temperature=temperature,
142
- top_p=top_p,
143
- top_k=top_k,
144
- num_beams=num_beams,
145
- **kwargs,
146
- )
147
- with torch.no_grad():
148
- generation_output = model.generate(
149
- input_ids=input_ids,
150
- generation_config=generation_config,
151
- return_dict_in_generate=True,
152
- output_scores=True,
153
- max_new_tokens=max_new_tokens,
154
- )
155
- s = generation_output.sequences[0]
156
- output = tokenizer.decode(s)
 
 
 
157
  elif llm == "flan-ul2":
158
  output = query({
159
  "inputs": prompt
 
135
  ):
136
  prompt = _TEMPLATE + "\n" + _add_markup(table) + "\n" + "Q: " + question + "\n" + "A:"
137
  if llm == "alpaca-lora":
138
+ # inputs = tokenizer(prompt, return_tensors="pt")
139
+ # input_ids = inputs["input_ids"].to(device)
140
+ # generation_config = GenerationConfig(
141
+ # temperature=temperature,
142
+ # top_p=top_p,
143
+ # top_k=top_k,
144
+ # num_beams=num_beams,
145
+ # **kwargs,
146
+ # )
147
+ # with torch.no_grad():
148
+ # generation_output = model.generate(
149
+ # input_ids=input_ids,
150
+ # generation_config=generation_config,
151
+ # return_dict_in_generate=True,
152
+ # output_scores=True,
153
+ # max_new_tokens=max_new_tokens,
154
+ # )
155
+ # s = generation_output.sequences[0]
156
+ # output = tokenizer.decode(s)
157
+ output = query({
158
+ "inputs": prompt
159
+ })
160
  elif llm == "flan-ul2":
161
  output = query({
162
  "inputs": prompt