|
--- |
|
license: mpl-2.0 |
|
language: |
|
- vi |
|
- zh |
|
pipeline_tag: translation |
|
--- |
|
|
|
## How to run: |
|
|
|
```python |
|
import torch |
|
import time |
|
from transformers import AutoTokenizer, pipeline |
|
from optimum.onnxruntime import ORTModelForSeq2SeqLM |
|
|
|
model = ORTModelForSeq2SeqLM.from_pretrained("chi-vi/hirashiba-mt-tiny-zh-vi-onnx") |
|
tokenizer = AutoTokenizer.from_pretrained("chi-vi/hirashiba-mt-tiny-zh-vi-onnx") |
|
|
|
with open("test.txt") as f: |
|
text = f.readlines() |
|
|
|
start_time = time.time() |
|
|
|
pipe = pipeline("translation", model=model, tokenizer=tokenizer, device=0) |
|
result = pipe(text) |
|
print(result) |
|
|
|
print("--- %s seconds ---" % (time.time() - start_time)) |
|
``` |