DeepLearning101 commited on
Commit
a23d52e
·
verified ·
1 Parent(s): 63cc258

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -14,8 +14,8 @@ auth_token = os.getenv("HF_TOKEN")
14
 
15
  # 加载私有模型
16
  model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
17
- model = AutoModelForSequenceClassification.from_pretrained(model_id, use_auth_token=auth_token)
18
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=auth_token)
19
 
20
  def transcribe(file_upload, microphone):
21
  file = microphone if microphone is not None else file_upload
@@ -30,7 +30,7 @@ def transcribe(file_upload, microphone):
30
  enhanced.export('enhanced.wav', format="wav", bitrate="256k")
31
 
32
  # 假设模型是用于文本分类
33
- inputs = tokenizer(enhanced, return_tensors="pt")
34
  outputs = model(**inputs)
35
  predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
36
 
@@ -57,4 +57,4 @@ demo = gr.Interface(
57
  ],
58
  )
59
 
60
- demo.launch(debug=True)
 
14
 
15
  # 加载私有模型
16
  model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
17
+ model = AutoModelForSequenceClassification.from_pretrained(model_id, token=auth_token)
18
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token)
19
 
20
  def transcribe(file_upload, microphone):
21
  file = microphone if microphone is not None else file_upload
 
30
  enhanced.export('enhanced.wav', format="wav", bitrate="256k")
31
 
32
  # 假设模型是用于文本分类
33
+ inputs = tokenizer("enhanced.wav", return_tensors="pt")
34
  outputs = model(**inputs)
35
  predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
36
 
 
57
  ],
58
  )
59
 
60
+ demo.launch(debug=True)