skypro1111 commited on
Commit
d329075
·
verified ·
1 Parent(s): 9e3acf7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +162 -0
README.md CHANGED
@@ -108,6 +108,168 @@ output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
108
  print(output_text)
109
  ```
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  ## Performance
112
  Evaluation metrics were not explicitly used for this model. Its performance is primarily demonstrated through its application in enhancing the naturalness of TTS outputs.
113
 
 
108
  print(output_text)
109
  ```
110
 
111
+ ## ONNX usage
112
+ ```bash
113
+ poetry new verbalizer
114
+ rm -rf verbalizer/tests/ verbalizer/verbalizer/ verbalizer/README.md
115
+ cd verbalizer/
116
+ poetry shell
117
+ wget https://huggingface.co/skypro1111/mbart-large-50-verbalization/resolve/main/onnx/infer_onnx_hf.py
118
+ poetry add transformers huggingface_hub onnxruntime-gpu torch
119
+ python infer_onnx_hf.py
120
+ ```
121
+
122
+ ```python
123
+ import onnxruntime
124
+ import numpy as np
125
+ from transformers import AutoTokenizer
126
+ import time
127
+ import os
128
+ from huggingface_hub import hf_hub_download
129
+
130
+ model_name = "skypro1111/mbart-large-50-verbalization"
131
+
132
+
133
+ def download_model_from_hf(repo_id=model_name, model_dir="onnx_hf"):
134
+ """Download ONNX models from HuggingFace Hub."""
135
+ os.makedirs(model_dir, exist_ok=True)
136
+
137
+ files = ["onnx/encoder_model.onnx", "onnx/decoder_model.onnx", "onnx/decoder_model.onnx_data"]
138
+
139
+ for file in files:
140
+ hf_hub_download(
141
+ repo_id=repo_id,
142
+ filename=file,
143
+ local_dir=model_dir
144
+ )
145
+
146
+ return files
147
+
148
+ def create_onnx_session(model_path, use_gpu=True):
149
+ """Create an ONNX inference session."""
150
+ # Session options
151
+ session_options = onnxruntime.SessionOptions()
152
+ session_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
153
+ session_options.enable_mem_pattern = True
154
+ session_options.enable_mem_reuse = True
155
+ session_options.intra_op_num_threads = 8
156
+ session_options.log_severity_level = 1
157
+
158
+ cuda_provider_options = {
159
+ 'device_id': 0,
160
+ 'arena_extend_strategy': 'kSameAsRequested',
161
+ 'gpu_mem_limit': 0, # 0 means no limit
162
+ 'cudnn_conv_algo_search': 'DEFAULT',
163
+ 'do_copy_in_default_stream': True,
164
+ }
165
+
166
+ print(f"Available providers: {onnxruntime.get_available_providers()}")
167
+ if use_gpu and 'CUDAExecutionProvider' in onnxruntime.get_available_providers():
168
+ providers = [('CUDAExecutionProvider', cuda_provider_options)]
169
+ print("Using CUDA for inference")
170
+ else:
171
+ providers = ['CPUExecutionProvider']
172
+ print("Using CPU for inference")
173
+
174
+ session = onnxruntime.InferenceSession(
175
+ model_path,
176
+ providers=providers,
177
+ sess_options=session_options
178
+ )
179
+
180
+ return session
181
+
182
+ def generate_text(text, tokenizer, encoder_session, decoder_session, max_length=128):
183
+ """Generate text for a single input."""
184
+ # Prepare input
185
+ inputs = tokenizer(text, return_tensors="np", padding=True, truncation=True, max_length=512)
186
+ input_ids = inputs["input_ids"].astype(np.int64)
187
+ attention_mask = inputs["attention_mask"].astype(np.int64)
188
+
189
+ # Run encoder
190
+ encoder_outputs = encoder_session.run(
191
+ output_names=["last_hidden_state"],
192
+ input_feed={
193
+ "input_ids": input_ids,
194
+ "attention_mask": attention_mask,
195
+ }
196
+ )[0]
197
+
198
+ # Initialize decoder input
199
+ decoder_input_ids = np.array([[tokenizer.pad_token_id]], dtype=np.int64)
200
+
201
+ # Generate sequence
202
+ for _ in range(max_length):
203
+ # Run decoder
204
+ decoder_outputs = decoder_session.run(
205
+ output_names=["logits"],
206
+ input_feed={
207
+ "input_ids": decoder_input_ids,
208
+ "encoder_hidden_states": encoder_outputs,
209
+ "encoder_attention_mask": attention_mask,
210
+ }
211
+ )[0]
212
+
213
+ # Get next token
214
+ next_token = decoder_outputs[:, -1:].argmax(axis=-1)
215
+ decoder_input_ids = np.concatenate([decoder_input_ids, next_token], axis=-1)
216
+
217
+ # Check if sequence is complete
218
+ if tokenizer.eos_token_id in decoder_input_ids[0]:
219
+ break
220
+
221
+ # Decode sequence
222
+ output_text = tokenizer.decode(decoder_input_ids[0], skip_special_tokens=True)
223
+ return output_text
224
+
225
+ def main():
226
+ # Print available providers
227
+ print("Available providers:", onnxruntime.get_available_providers())
228
+
229
+ # Download models from HuggingFace
230
+ print("\nDownloading models from HuggingFace...")
231
+ encoder_path, decoder_path, _ = download_model_from_hf()
232
+
233
+ # Load tokenizer and models
234
+ print("\nLoading tokenizer...")
235
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
236
+ tokenizer.src_lang = "uk_UA"
237
+ tokenizer.tgt_lang = "uk_UA"
238
+
239
+ # Create ONNX sessions
240
+ print("\nLoading encoder...")
241
+ encoder_session = create_onnx_session(encoder_path)
242
+ print("\nLoading decoder...")
243
+ decoder_session = create_onnx_session(decoder_path)
244
+
245
+ # Test examples
246
+ test_inputs = [
247
+ "мій телефон 0979456822",
248
+ "квартира площею 11 тис кв м.",
249
+ "Пропонували хабар у 1 млрд грн.",
250
+ "1 2 3 4 5 6 7 8 9 10.",
251
+ "Крім того, парламентарій володіє шістьма ділянками землі (дві площею 25000 кв м, дві по 15000 кв м та дві по 10000 кв м) розташованими в Сосновій Балці Луганської області.",
252
+ "Підписуючи цей документ у 2003 році, голови Росії та України мали намір зміцнити співпрацю та сприяти розширенню двосторонніх відносин.",
253
+ "Очікується, що цей застосунок буде запущено 22.08.2025.",
254
+ "За інформацією від Державної служби з надзвичайних ситуацій станом на 7 ранку 15 липня.",
255
+ ]
256
+
257
+ print("\nWarming up...")
258
+ _ = generate_text(test_inputs[0], tokenizer, encoder_session, decoder_session)
259
+
260
+ print("\nRunning inference...")
261
+ for text in test_inputs:
262
+ print(f"\nInput: {text}")
263
+ t = time.time()
264
+ output = generate_text(text, tokenizer, encoder_session, decoder_session)
265
+ print(f"Output: {output}")
266
+ print(f"Time: {time.time() - t:.2f} seconds")
267
+
268
+ if __name__ == "__main__":
269
+ main()
270
+ ```
271
+
272
+
273
  ## Performance
274
  Evaluation metrics were not explicitly used for this model. Its performance is primarily demonstrated through its application in enhancing the naturalness of TTS outputs.
275