NeerAbhy commited on
Commit
a70efa9
·
verified ·
1 Parent(s): 2445a44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -125,9 +125,7 @@ lang_id = {
125
  "Chinese": "zh",
126
  "Zulu": "zu",
127
  }
128
- def translation_text(source_lang, target_lang, user_input):
129
- src_lang = lang_id[source_lang]
130
- trg_lang = lang_id[target_lang]
131
  tokenizer.src_lang = src_lang
132
  with torch.no_grad():
133
  encoded_input = tokenizer(user_input, return_tensors="pt").to(device)
@@ -148,13 +146,14 @@ with demo:
148
  text2 = gr.Textbox()
149
  source_lang = gr.Dropdown(label="Source lang", choices=list(lang_id.keys()), value=list(lang_id.keys())[0])
150
  target_lang = gr.Dropdown(label="target lang", choices=list(lang_id.keys()), value=list(lang_id.keys())[0])
151
-
 
152
  #gr.Examples(examples = list(lang_id.keys()),
153
  # inputs=[
154
  # source_lang])
155
  b1 = gr.Button("convert to text")
156
  b3 = gr.Button("translate")
157
- b3.click(translation_text, inputs = [source_lang, target_lang, text0], outputs = text2)
158
  b1.click(audio_a_text, inputs=audio, outputs=text)
159
 
160
  b2 = gr.Button("Classification of speech")
 
125
  "Chinese": "zh",
126
  "Zulu": "zu",
127
  }
128
+ def translation_text(src_lang, trg_lang, user_input):
 
 
129
  tokenizer.src_lang = src_lang
130
  with torch.no_grad():
131
  encoded_input = tokenizer(user_input, return_tensors="pt").to(device)
 
146
  text2 = gr.Textbox()
147
  source_lang = gr.Dropdown(label="Source lang", choices=list(lang_id.keys()), value=list(lang_id.keys())[0])
148
  target_lang = gr.Dropdown(label="target lang", choices=list(lang_id.keys()), value=list(lang_id.keys())[0])
149
+ src_lang = lang_id[source_lang]
150
+ trg_lang = lang_id[target_lang]
151
  #gr.Examples(examples = list(lang_id.keys()),
152
  # inputs=[
153
  # source_lang])
154
  b1 = gr.Button("convert to text")
155
  b3 = gr.Button("translate")
156
+ b3.click(translation_text, inputs = (src_lang, trg_lang, text0), outputs = text2)
157
  b1.click(audio_a_text, inputs=audio, outputs=text)
158
 
159
  b2 = gr.Button("Classification of speech")