Upload tabs/comparison.py with huggingface_hub
Browse files- tabs/comparison.py +76 -0
tabs/comparison.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils import get_tokenizer, get_tokenization, get_vocab_size
|
3 |
+
import logging
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
# Configure logging
|
7 |
+
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
8 |
+
|
9 |
+
dataframe_path = "darija_tokenizers_leaderboard.jsonl"
|
10 |
+
|
11 |
+
|
12 |
+
def calculate_metrics(tokenizer_id, text):
|
13 |
+
logging.debug(f"Calculating metrics for tokenizer: {tokenizer_id}")
|
14 |
+
try:
|
15 |
+
tokenizer = get_tokenizer(tokenizer_id)
|
16 |
+
tokens = get_tokenization(tokenizer, text)
|
17 |
+
vocab_size = get_vocab_size(tokenizer)
|
18 |
+
tokens_count = len(tokens)
|
19 |
+
tokens_ratio = tokens_count / len(text) if len(text) > 0 else 0
|
20 |
+
logging.debug(f"Metrics calculated: vocab_size={vocab_size}, tokens_count={tokens_count}, tokens_ratio={tokens_ratio}")
|
21 |
+
return {
|
22 |
+
"Tokenizer": tokenizer_id,
|
23 |
+
"Vocabulary Size": vocab_size,
|
24 |
+
"Token Count": tokens_count,
|
25 |
+
"Tokens/Character Ratio": tokens_ratio
|
26 |
+
}
|
27 |
+
except Exception as e:
|
28 |
+
logging.error(f"Error processing {tokenizer_id}: {e}")
|
29 |
+
st.error(f"Error processing {tokenizer_id}: {e}")
|
30 |
+
return None
|
31 |
+
|
32 |
+
def comparison_tab(df):
|
33 |
+
st.header("Tokenizer Comparison")
|
34 |
+
st.markdown("Compare two tokenizers side by side.")
|
35 |
+
|
36 |
+
input_text = st.text_area("Enter text to compare:", "هذا مثال لنص بالدارجة المغربية")
|
37 |
+
|
38 |
+
col1, col2 = st.columns(2)
|
39 |
+
|
40 |
+
with col1:
|
41 |
+
st.subheader("Tokenizer 1")
|
42 |
+
tokenizer_1_choice = st.radio("Select Tokenizer 1 Source", ["From Leaderboard", "Enter New Model"], key="tokenizer_1_source")
|
43 |
+
if tokenizer_1_choice == "From Leaderboard":
|
44 |
+
model_1 = st.selectbox("Select Tokenizer 1", df["Tokenizer"].tolist(), key="model_1")
|
45 |
+
else:
|
46 |
+
model_1 = st.text_input("Enter Tokenizer 1 Name", key="model_1_input")
|
47 |
+
if input_text and model_1:
|
48 |
+
with st.spinner(f"Tokenizing with {model_1}..."):
|
49 |
+
metrics = calculate_metrics(model_1, input_text)
|
50 |
+
if metrics:
|
51 |
+
st.write(f"**Vocabulary Size:** {metrics['Vocabulary Size']}")
|
52 |
+
st.write(f"**Token Count:** {metrics['Token Count']}")
|
53 |
+
st.write(f"**Tokens/Character Ratio:** {metrics['Tokens/Character Ratio']:.4f}")
|
54 |
+
tokenizer = get_tokenizer(model_1)
|
55 |
+
tokens = tokenizer.tokenize(input_text)
|
56 |
+
tokens_html = ' '.join([f'<span style="background-color: #e6f3ff; padding: 2px 5px; margin-right: 5px; border-radius: 3px;">{token}</span>' for token in tokens])
|
57 |
+
st.markdown(f'<div style="line-height: 2.5;">{tokens_html}</div>', unsafe_allow_html=True)
|
58 |
+
|
59 |
+
with col2:
|
60 |
+
st.subheader("Tokenizer 2")
|
61 |
+
tokenizer_2_choice = st.radio("Select Tokenizer 2 Source", ["From Leaderboard", "Enter New Model"], key="tokenizer_2_source")
|
62 |
+
if tokenizer_2_choice == "From Leaderboard":
|
63 |
+
model_2 = st.selectbox("Select Tokenizer 2", df["Tokenizer"].tolist(), key="model_2")
|
64 |
+
else:
|
65 |
+
model_2 = st.text_input("Enter Tokenizer 2 Name", key="model_2_input")
|
66 |
+
if input_text and model_2:
|
67 |
+
with st.spinner(f"Tokenizing with {model_2}..."):
|
68 |
+
metrics = calculate_metrics(model_2, input_text)
|
69 |
+
if metrics:
|
70 |
+
st.write(f"**Vocabulary Size:** {metrics['Vocabulary Size']}")
|
71 |
+
st.write(f"**Token Count:** {metrics['Token Count']}")
|
72 |
+
st.write(f"**Tokens/Character Ratio:** {metrics['Tokens/Character Ratio']:.4f}")
|
73 |
+
tokenizer = get_tokenizer(model_2)
|
74 |
+
tokens = tokenizer.tokenize(input_text)
|
75 |
+
tokens_html = ' '.join([f'<span style="background-color: #fa87b5; padding: 2px 5px; margin-right: 5px; border-radius: 3px;">{token}</span>' for token in tokens])
|
76 |
+
st.markdown(f'<div style="line-height: 2.5;">{tokens_html}</div>', unsafe_allow_html=True)
|