ajsbsd commited on
Commit
9ebacb1
·
verified ·
1 Parent(s): 6eed942

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +22 -250
README.md CHANGED
@@ -1,14 +1,25 @@
1
  ---
2
- title: Qwen2.5 1.5B Instruct Gkd Demo
3
- emoji: 📊
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.34.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: Qwen2.5-1.5B-Instruct-gkd-demo
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  ## ✅ What’s Fixed & Improved
@@ -22,243 +33,4 @@ short_description: Qwen2.5-1.5B-Instruct-gkd-demo
22
  | ⚙️ **Model Loading Optimization** | Moved model loading into the first request (Hugging Face Spaces friendly) |
23
  | 🧼 **Code Cleanliness** | Better structure, comments, and readability |
24
 
25
- ---
26
-
27
- ## 📦 Final Version of `app.py`
28
-
29
- Here is your updated file:
30
-
31
- ```python
32
- import gradio as gr
33
- import torch
34
- from transformers import (
35
- AutoTokenizer,
36
- AutoModelForCausalLM,
37
- SpeechT5Processor,
38
- SpeechT5ForTextToSpeech,
39
- SpeechT5HifiGan,
40
- WhisperProcessor,
41
- WhisperForConditionalGeneration
42
- )
43
- from datasets import load_dataset
44
- import os
45
- import spaces
46
- import tempfile
47
- import soundfile as sf
48
- import librosa
49
-
50
- # --- Configuration ---
51
- HUGGINGFACE_MODEL_ID = "HuggingFaceH4/Qwen2.5-1.5B-Instruct-gkd"
52
- TORCH_DTYPE = torch.bfloat16
53
- MAX_NEW_TOKENS = 512
54
- DO_SAMPLE = True
55
- TEMPERATURE = 0.7
56
- TOP_K = 50
57
- TOP_P = 0.95
58
-
59
- TTS_MODEL_ID = "microsoft/speecht5_tts"
60
- TTS_VOCODER_ID = "microsoft/speecht5_hifigan"
61
- STT_MODEL_ID = "openai/whisper-small"
62
-
63
- # --- Global Variables ---
64
- tokenizer = None
65
- llm_model = None
66
- tts_processor = None
67
- tts_model = None
68
- tts_vocoder = None
69
- speaker_embeddings = None
70
- whisper_processor = None
71
- whisper_model = None
72
- first_load = True
73
-
74
- # --- Helper: Split Text Into Chunks ---
75
- def split_text_into_chunks(text, max_chars=400):
76
- sentences = text.replace("...", ".").split(". ")
77
- chunks = []
78
- current_chunk = ""
79
- for sentence in sentences:
80
- if len(current_chunk) + len(sentence) + 2 < max_chars:
81
- current_chunk += ". " + sentence if current_chunk else sentence
82
- else:
83
- chunks.append(current_chunk)
84
- current_chunk = sentence
85
- if current_chunk:
86
- chunks.append(current_chunk)
87
- return [f"{chunk}." for chunk in chunks if chunk.strip()]
88
-
89
- # --- Load Models Function ---
90
- @spaces.GPU
91
- def load_models():
92
- global tokenizer, llm_model, tts_processor, tts_model, tts_vocoder, speaker_embeddings, whisper_processor, whisper_model
93
- hf_token = os.environ.get("HF_TOKEN")
94
-
95
- # LLM
96
- if tokenizer is None or llm_model is None:
97
- try:
98
- tokenizer = AutoTokenizer.from_pretrained(HUGGINGFACE_MODEL_ID, token=hf_token)
99
- if tokenizer.pad_token is None:
100
- tokenizer.pad_token = tokenizer.eos_token
101
- llm_model = AutoModelForCausalLM.from_pretrained(
102
- HUGGINGFACE_MODEL_ID,
103
- torch_dtype=TORCH_DTYPE,
104
- device_map="auto",
105
- token=hf_token
106
- ).eval()
107
- print("LLM loaded successfully.")
108
- except Exception as e:
109
- print(f"Error loading LLM: {e}")
110
-
111
- # TTS
112
- if tts_processor is None or tts_model is None or tts_vocoder is None:
113
- try:
114
- tts_processor = SpeechT5Processor.from_pretrained(TTS_MODEL_ID, token=hf_token)
115
- tts_model = SpeechT5ForTextToSpeech.from_pretrained(TTS_MODEL_ID, token=hf_token)
116
- tts_vocoder = SpeechT5HifiGan.from_pretrained(TTS_VOCODER_ID, token=hf_token)
117
- embeddings = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation", token=hf_token)
118
- speaker_embeddings = torch.tensor(embeddings[7306]["xvector"]).unsqueeze(0)
119
- device = llm_model.device if llm_model else 'cpu'
120
- tts_model.to(device)
121
- tts_vocoder.to(device)
122
- speaker_embeddings = speaker_embeddings.to(device)
123
- print("TTS models loaded.")
124
- except Exception as e:
125
- print(f"Error loading TTS: {e}")
126
-
127
- # STT
128
- if whisper_processor is None or whisper_model is None:
129
- try:
130
- whisper_processor = WhisperProcessor.from_pretrained(STT_MODEL_ID, token=hf_token)
131
- whisper_model = WhisperForConditionalGeneration.from_pretrained(STT_MODEL_ID, token=hf_token)
132
- device = llm_model.device if llm_model else 'cpu'
133
- whisper_model.to(device)
134
- print("Whisper loaded.")
135
- except Exception as e:
136
- print(f"Error loading Whisper: {e}")
137
-
138
- # --- Generate Response and Audio ---
139
- @spaces.GPU
140
- def generate_response_and_audio(message, history):
141
- global first_load
142
- if first_load:
143
- load_models()
144
- first_load = False
145
-
146
- global tokenizer, llm_model, tts_processor, tts_model, tts_vocoder, speaker_embeddings
147
-
148
- if tokenizer is None or llm_model is None:
149
- return [{"role": "assistant", "content": "Error: LLM not loaded."}], None
150
-
151
- messages = history.copy()
152
- messages.append({"role": "user", "content": message})
153
-
154
- try:
155
- input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
156
- except:
157
- input_text = ""
158
- for item in history:
159
- input_text += f"{item['role'].capitalize()}: {item['content']}\n"
160
- input_text += f"User: {message}\nAssistant:"
161
-
162
- try:
163
- inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True).to(llm_model.device)
164
- output_ids = llm_model.generate(
165
- inputs["input_ids"],
166
- attention_mask=inputs["attention_mask"],
167
- max_new_tokens=MAX_NEW_TOKENS,
168
- do_sample=DO_SAMPLE,
169
- temperature=TEMPERATURE,
170
- top_k=TOP_K,
171
- top_p=TOP_P,
172
- pad_token_id=tokenizer.eos_token_id
173
- )
174
- generated_text = tokenizer.decode(output_ids[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True).strip()
175
- except Exception as e:
176
- print(f"LLM error: {e}")
177
- return history + [{"role": "assistant", "content": "I had an issue generating a response."}], None
178
-
179
- audio_path = None
180
- if None not in [tts_processor, tts_model, tts_vocoder, speaker_embeddings]:
181
- try:
182
- device = llm_model.device
183
- text_chunks = split_text_into_chunks(generated_text)
184
-
185
- full_speech = []
186
- for chunk in text_chunks:
187
- tts_inputs = tts_processor(text=chunk, return_tensors="pt", max_length=512, truncation=True).to(device)
188
- speech = tts_model.generate_speech(tts_inputs["input_ids"], speaker_embeddings, vocoder=tts_vocoder)
189
- full_speech.append(speech.cpu())
190
-
191
- full_speech_tensor = torch.cat(full_speech, dim=0)
192
-
193
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file:
194
- audio_path = tmp_file.name
195
- sf.write(audio_path, full_speech_tensor.numpy(), samplerate=16000)
196
-
197
- except Exception as e:
198
- print(f"TTS error: {e}")
199
-
200
- return history + [{"role": "assistant", "content": generated_text}], audio_path
201
-
202
- # --- Transcribe Audio ---
203
- @spaces.GPU
204
- def transcribe_audio(filepath):
205
- global first_load
206
- if first_load:
207
- load_models()
208
- first_load = False
209
-
210
- global whisper_processor, whisper_model
211
- if whisper_model is None:
212
- return "Whisper model not loaded."
213
-
214
- try:
215
- audio, sr = librosa.load(filepath, sr=16000)
216
- inputs = whisper_processor(audio, sampling_rate=sr, return_tensors="pt").input_features.to(whisper_model.device)
217
- outputs = whisper_model.generate(inputs)
218
- return whisper_processor.batch_decode(outputs, skip_special_tokens=True)[0]
219
- except Exception as e:
220
- return f"Transcription failed: {e}"
221
-
222
- # --- Gradio UI ---
223
- with gr.Blocks() as demo:
224
- gr.Markdown("# Qwen2.5 Chatbot with Voice Input/Output")
225
-
226
- with gr.Tab("Chat"):
227
- chatbot = gr.Chatbot(type='messages')
228
- text_input = gr.Textbox(placeholder="Type your message...")
229
- audio_output = gr.Audio(label="Response Audio", autoplay=True)
230
- text_input.submit(generate_response_and_audio, [text_input, chatbot], [chatbot, audio_output])
231
-
232
- with gr.Tab("Transcribe"):
233
- audio_input = gr.Audio(type="filepath", label="Upload Audio")
234
- transcribed = gr.Textbox(label="Transcription")
235
- audio_input.upload(transcribe_audio, audio_input, transcribed)
236
-
237
- clear_btn = gr.Button("Clear All")
238
- clear_btn.click(lambda: ([], "", None), None, [chatbot, text_input, audio_output])
239
-
240
- demo.queue().launch()
241
- ```
242
-
243
- ---
244
-
245
- ## ✅ Instructions for Uploading to Hugging Face Spaces
246
-
247
- 1. **Go to your Space**: https://huggingface.co/spaces/ajsbsd/Qwen2.5-1.5B-Instruct-gkd-demo
248
- 2. **Pause the CI**: Go to `Settings > Runtime`, and switch from "Always On" to "Manual"
249
- 3. **Delete old app.py**
250
- 4. **Upload this new file** as `app.py`
251
- 5. **Start the CI again**
252
-
253
- ---
254
-
255
- ## 🧩 Optional Enhancements
256
-
257
- Would you like me to help you with any of the following?
258
-
259
- - Add **status indicators** during model loading or generation
260
- - Allow **microphone input** directly in chat tab
261
- - Use `gr.State()` to store chat history more efficiently
262
- - Package models into a custom repo for faster load times
263
-
264
- Just let me know what you'd like next!Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Qwen2.5 1.5B Instruct Gkd Demo
3
+ emoji: 📊
4
+ colorFrom: yellow
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 5.34.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Qwen2.5-1.5B-Instruct-gkd-demo
12
+ ---
13
+
14
+ # Qwen2.5 1.5B Instruct Gkd Demo
15
+
16
+ A voice-enabled chatbot powered by:
17
+ - `Qwen2.5-1.5B-Instruct-gkd` for language generation
18
+ - `SpeechT5` for text-to-speech
19
+ - `Whisper-small` for speech-to-text
20
+
21
+ Try chatting, listen to the audio response, or upload an audio file for transcription.
22
+
23
  ---
24
 
25
  ## ✅ What’s Fixed & Improved
 
33
  | ⚙️ **Model Loading Optimization** | Moved model loading into the first request (Hugging Face Spaces friendly) |
34
  | 🧼 **Code Cleanliness** | Better structure, comments, and readability |
35
 
36
+ ---