File size: 399 Bytes
5cbfd0d |
1 2 3 4 5 6 7 8 9 10 11 12 |
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Load your pre-trained model and tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained("Vuks/sanchit_whisper")
tokenizer = AutoTokenizer.from_pretrained("Vuks/sanchit_whisper")
def handle(request, context):
inputs = tokenizer(request["inputs"], return_tensors="pt")
outputs = model(**inputs)
return outputs
|