SimrusDenuvo commited on
Commit
b5e2b48
·
verified ·
1 Parent(s): 9865594

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -8,19 +8,24 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
  def generate_response(prompt):
11
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
 
 
12
  output = model.generate(
13
  input_ids,
14
- max_length=200,
15
- num_return_sequences=1,
16
  do_sample=True,
17
  top_k=50,
18
  top_p=0.95,
19
  temperature=0.9,
20
- pad_token_id=tokenizer.eos_token_id
 
21
  )
 
22
  response = tokenizer.decode(output[0], skip_special_tokens=True)
23
- return response
 
 
24
 
25
  iface = gr.Interface(
26
  fn=generate_response,
 
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
  def generate_response(prompt):
11
+ instruction = f"Ответь кратко и ясно на вопрос:\n{prompt.strip()}\nОтвет:"
12
+ input_ids = tokenizer.encode(instruction, return_tensors="pt")
13
+
14
  output = model.generate(
15
  input_ids,
16
+ max_new_tokens=100,
 
17
  do_sample=True,
18
  top_k=50,
19
  top_p=0.95,
20
  temperature=0.9,
21
+ pad_token_id=tokenizer.eos_token_id,
22
+ eos_token_id=tokenizer.eos_token_id
23
  )
24
+
25
  response = tokenizer.decode(output[0], skip_special_tokens=True)
26
+
27
+ # Удалим часть промпта из начала
28
+ return response.replace(instruction, "").strip()
29
 
30
  iface = gr.Interface(
31
  fn=generate_response,