gdnartea commited on
Commit
98783be
·
verified ·
1 Parent(s): a442451

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -4
app.py CHANGED
@@ -98,17 +98,24 @@ start = {"role": "system", "content": "You are a helpful digital assistant. Plea
98
 
99
 
100
  def generate_response(user_input):
101
- messages = [start, {"role": "user", "content": user_input}]
102
- inputs = proc_tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
 
 
 
 
 
 
 
103
  with torch.no_grad():
104
  outputs = proc_model.generate(
105
  inputs,
106
- max_new_tokens=48,
107
  )
108
 
109
  response = proc_tokenizer.batch_decode(
110
  outputs,
111
- skip_special_tokens=True,
112
  clean_up_tokenization_spaces=False,
113
  )[0]
114
 
@@ -118,6 +125,7 @@ def CanaryPhi(audio_filepath):
118
  user_input = transcribe(audio_filepath)
119
  print(user_input)
120
  response = generate_response(user_input)
 
121
  return response
122
 
123
 
 
98
 
99
 
100
  def generate_response(user_input):
101
+ messages = [start, {"role": "user", "content": user_input'}]
102
+ inputs = proc_tokenizer.apply_chat_template(
103
+ messages,
104
+ add_generation_prompt=True,
105
+ return_tensors="pt",
106
+ )
107
+
108
+
109
+
110
  with torch.no_grad():
111
  outputs = proc_model.generate(
112
  inputs,
113
+ max_new_tokens=32,
114
  )
115
 
116
  response = proc_tokenizer.batch_decode(
117
  outputs,
118
+ #skip_special_tokens=True,
119
  clean_up_tokenization_spaces=False,
120
  )[0]
121
 
 
125
  user_input = transcribe(audio_filepath)
126
  print(user_input)
127
  response = generate_response(user_input)
128
+ print(response)
129
  return response
130
 
131