HeshamHaroon commited on
Commit
58c0de5
·
verified ·
1 Parent(s): 890f5a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import aranizer
4
  from aranizer import aranizer_bpe50k, aranizer_bpe64k, aranizer_bpe86k, aranizer_sp32k, aranizer_sp50k, aranizer_sp64k, aranizer_sp86k
5
 
6
- # List of available tokenizers for the Radio buttons
7
  tokenizer_options = {
8
  "aranizer_bpe50k": "BPE 50k",
9
  "aranizer_bpe64k": "BPE 64k",
@@ -14,7 +14,7 @@ tokenizer_options = {
14
  "aranizer_sp86k": "SP 86k",
15
  }
16
 
17
- # Mapping from names to tokenizer getters
18
  tokenizers = {
19
  "aranizer_bpe50k": aranizer_bpe50k.get_tokenizer,
20
  "aranizer_bpe64k": aranizer_bpe64k.get_tokenizer,
@@ -25,30 +25,29 @@ tokenizers = {
25
  "aranizer_sp86k": aranizer_sp86k.get_tokenizer,
26
  }
27
 
28
- def compare_tokenizers(tokenizer_name, text):
29
- tokenizer = tokenizers[tokenizer_name]() # Loading the selected tokenizer
 
30
  tokens = tokenizer.tokenize(text)
31
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
32
 
33
- # Prepare the results to be displayed without Decoded Text
34
- results = [(tokenizer_name, tokens, encoded_output)]
35
  return results
36
 
37
- # Define Gradio interface components
38
  inputs_component = [
39
  gr.Radio(choices=list(tokenizer_options.values()), label="Select Tokenizer"),
40
  gr.Textbox(lines=2, placeholder="Enter Arabic text here...", label="Input Text")
41
  ]
42
- # Adjusted outputs to exclude the Decoded Text
43
  outputs_component = gr.Dataframe(headers=["Tokenizer", "Tokens", "Encoded Output"], label="Results")
44
 
45
- # Setting up the interface
 
46
  iface = Interface(
47
  fn=compare_tokenizers,
48
  inputs=inputs_component,
49
  outputs=outputs_component,
50
- title="AraNizer Tokenizer Comparison"
 
51
  )
52
 
53
- # Launching the Gradio app
54
  iface.launch()
 
3
  import aranizer
4
  from aranizer import aranizer_bpe50k, aranizer_bpe64k, aranizer_bpe86k, aranizer_sp32k, aranizer_sp50k, aranizer_sp64k, aranizer_sp86k
5
 
6
+
7
  tokenizer_options = {
8
  "aranizer_bpe50k": "BPE 50k",
9
  "aranizer_bpe64k": "BPE 64k",
 
14
  "aranizer_sp86k": "SP 86k",
15
  }
16
 
17
+
18
  tokenizers = {
19
  "aranizer_bpe50k": aranizer_bpe50k.get_tokenizer,
20
  "aranizer_bpe64k": aranizer_bpe64k.get_tokenizer,
 
25
  "aranizer_sp86k": aranizer_sp86k.get_tokenizer,
26
  }
27
 
28
+ def compare_tokenizers(tokenizer_label, text):
29
+ tokenizer_key = next(key for key, value in tokenizer_options.items() if value == tokenizer_label)
30
+ tokenizer = tokenizers[tokenizer_key]()
31
  tokens = tokenizer.tokenize(text)
32
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
33
 
34
+ results = [(tokenizer_label, tokens, encoded_output)]
 
35
  return results
36
 
 
37
  inputs_component = [
38
  gr.Radio(choices=list(tokenizer_options.values()), label="Select Tokenizer"),
39
  gr.Textbox(lines=2, placeholder="Enter Arabic text here...", label="Input Text")
40
  ]
 
41
  outputs_component = gr.Dataframe(headers=["Tokenizer", "Tokens", "Encoded Output"], label="Results")
42
 
43
+ description = "Select a tokenizer and input the Arabic text to see the tokenization results. For a better view of the results table, please maximize your browser window."
44
+
45
  iface = Interface(
46
  fn=compare_tokenizers,
47
  inputs=inputs_component,
48
  outputs=outputs_component,
49
+ title="AraNizer Tokenizer Comparison",
50
+ description=description
51
  )
52
 
 
53
  iface.launch()