Spaces:
Sleeping
Sleeping
File size: 1,582 Bytes
9781c4b 0f464dc 9781c4b 0f464dc c22b148 9781c4b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from minbpe.regex import RegexTokenizer
from typing import List, Dict
# Initialize the tokenizer
tokenizer = RegexTokenizer()
tokenizer.load("modelsV3-regex-new-testament-only/regex.model")
def process_text(text: str) -> str:
# Get token IDs
token_ids = tokenizer.encode(text)
# Build formatted output
output = []
output.append("Token Details:")
output.append("-" * 50)
for token_id in token_ids:
# Get the bytes for this token
token_bytes = tokenizer.vocab[token_id]
# Convert bytes to string representation, replacing invalid chars
token_text = token_bytes.decode('utf-8', errors='replace')
output.append(f"Token ID: {token_id}")
output.append(f"Token Bytes: {str(token_bytes)}")
output.append(f"Token Text: {token_text}")
output.append("-" * 50)
# Add decoded text at the end
decoded_text = tokenizer.decode(token_ids)
output.append(f"\nFull decoded text: {decoded_text}")
return "\n".join(output)
# Create Gradio interface
demo = gr.Interface(
fn=process_text,
inputs=gr.Textbox(label="Enter text to tokenize", lines=3),
outputs=gr.Textbox(label="Token Information", lines=10),
title="Hindi Text Tokenizer",
description="Enter Hindi text to see its token representation using Byte Pair Encoding",
examples=[
["उज्जियाह से योताम उत्पन्न"],
["नमस्ते दुनिया"]
]
)
if __name__ == "__main__":
demo.launch() |