neelimapreeti297 commited on
Commit
3b60890
·
verified ·
1 Parent(s): 469d81e

Upload translator_app.py

Browse files
Files changed (1) hide show
  1. translator_app.py +42 -0
translator_app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from torchtext.data.utils import get_tokenizer
4
+ from torchtext.vocab import build_vocab_from_iterator
5
+ from torchtext.datasets import Multi30k
6
+ from torch import Tensor
7
+ from typing import Iterable, List
8
+
9
+ # Define your model, tokenizer, and other necessary components here
10
+ # Ensure you have imported all necessary libraries
11
+
12
+ # Load your transformer model
13
+ model = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE,
14
+ NHEAD, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, FFN_HID_DIM)
15
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16
+ model.load_state_dict(torch.load('./transformer_model.pth', map_location=device))
17
+ model.eval()
18
+
19
+ def translate(model: torch.nn.Module, src_sentence: str):
20
+ model.eval()
21
+ src = text_transform[SRC_LANGUAGE](src_sentence).view(-1, 1)
22
+ num_tokens = src.shape[0]
23
+ src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
24
+ tgt_tokens = greedy_decode(
25
+ model, src, src_mask, max_len=num_tokens + 5, start_symbol=BOS_IDX).flatten()
26
+ return " ".join(vocab_transform[TGT_LANGUAGE].lookup_tokens(list(tgt_tokens.cpu().numpy()))).replace("<bos>", "").replace("<eos>", "")
27
+
28
+ if __name__ == "__main__":
29
+ # Create the Gradio interface
30
+ iface = gr.Interface(
31
+ fn=translate, # Specify the translation function as the main function
32
+ inputs=[
33
+ gr.inputs.Textbox(label="Text")
34
+
35
+ ],
36
+ outputs=["text"], # Define the output type as text
37
+ cache_examples=False, # Disable caching of examples
38
+ title="germanToenglish", # Set the title of the interface
39
+ )
40
+
41
+ # Launch the interface
42
+ iface.launch(share=True)