agalma / app.py
Mark7549's picture
updated FAQ
01b39fb
raw
history blame
27.9 kB
import streamlit as st
from streamlit_option_menu import option_menu
from word2vec import *
import pandas as pd
from autocomplete import *
from vector_graph import *
from plots import *
from lsj_dict import *
import json
from streamlit_tags import st_tags, st_tags_sidebar
st.set_page_config(page_title="ἄγαλμα | AGALMA", layout="centered", page_icon="images/AGALMA_logo.png")
# Cache data
@st.cache_data
def load_lsj_dict():
return json.load(open('lsj_dict.json', 'r'))
@st.cache_data
def load_all_models_words():
return sorted(load_compressed_word_list('corpora/compass_filtered.pkl.gz'), key=custom_sort)
@st.cache_data
def load_models_for_word_dict():
return word_in_models_dict('corpora/compass_filtered.pkl.gz')
@st.cache_data
def load_all_lemmas():
return load_compressed_word_list('all_lemmas.pkl.gz')
@st.cache_data
def load_lemma_count_dict():
return count_lemmas('lemma_list_raw')
# Load compressed word list
all_models_words = load_all_models_words()
# Prepare lsj dictionary
lemma_dict = load_lsj_dict()
# Load dictionary with words as keys and eligible models as values
models_for_word_dict = load_models_for_word_dict()
lemma_counts = load_lemma_count_dict()
# Set styles for menu
styles_horizontal = {
"container": {"display": "flex", "justify-content": "center"},
"nav": {"display": "flex", "gap": "2px", "margin": "5px"},
"nav-item": {"flex": "1", "font-family": "Helvetica"},
"nav-link": {
"background-color": "#f0f0f0",
"border": "1px solid #ccc",
"border-radius": "5px",
"padding": "10px",
"width": "150px",
"height": "60px",
"display": "flex",
"align-items": "center",
"justify-content": "center",
"transition": "background-color 0.3s, color 0.3s",
"color": "black",
"text-decoration": "none"
},
"nav-link:hover": {
"background-color": "rgb(238, 238, 238)",
"color": "#000"
},
"nav-link-selected": {
"background-color": "#B8E52B",
"color": "white",
"font-weight": "bold"
},
"icon": {"display": "None"}
}
styles_vertical = {
"nav-link-selected": {
"background-color": "#B8E52B",
"color": "white",
"font-weight": "bold"
}
}
# Set vertical sidebar width to 350px
st.markdown(
"""
<style>
section[data-testid="stSidebar"] {
width: 350px !important; # Set the width to your desired value
}
</style>
""",
unsafe_allow_html=True,
)
with st.sidebar:
st.image('images/AGALMA_logo_v2.png')
# st.markdown('# ἄγαλμα | AGALMA')
selected = option_menu('ἄγαλμα | AGALMA', ["App", "About", "FAQ", "Subcorpora", "License"],
menu_icon="menu", default_index=0, orientation="vertical", styles=styles_vertical)
if selected == "App":
# Horizontal menu
active_tab = option_menu(None, ["Nearest neighbours", "Cosine similarity", "3D graph", 'Dictionary'],
menu_icon="cast", default_index=0, orientation="horizontal", styles=styles_horizontal)
# Adding CSS style to remove list-style-type
st.markdown("""
<style>
/* Define a class to remove list-style-type */
.no-list-style {
list-style-type: none;
}
</style>
""", unsafe_allow_html=True)
# Nearest neighbours tab
if active_tab == "Nearest neighbours":
# All models in a list
eligible_models = ["Archaic", "Classical", "Hellenistic", "Early Roman", "Late Roman"]
all_models_words = load_all_models_words()
with st.container():
st.markdown("## Nearest Neighbours")
st.markdown('Here you can extract the nearest neighbours to a chosen lemma. Please select one or more time slices and the preferred number of nearest neighbours.')
target_word = st.multiselect("Enter a word", options=all_models_words, max_selections=1)
if len(target_word) > 0:
target_word = target_word[0]
eligible_models = models_for_word_dict[target_word]
models = st.multiselect(
"Select models to search for neighbours",
eligible_models
)
n = st.slider("Number of neighbours", 1, 50, 15)
nearest_neighbours_button = st.button("Find nearest neighbours")
if nearest_neighbours_button:
if validate_nearest_neighbours(target_word, n, models) == False:
st.error('Please fill in all fields')
else:
# Rewrite models to list of all loaded models
models = load_selected_models(models)
nearest_neighbours = get_nearest_neighbours(target_word, n, models)
all_dfs = []
# Create dataframes
for model in nearest_neighbours.keys():
st.write(f"### {model}")
df = pd.DataFrame(
nearest_neighbours[model],
columns = ['Word', 'Cosine Similarity']
)
# Add word occurences to dataframe
df['Occurences'] = df['Word'].apply(lambda x: lemma_counts[model][x])
all_dfs.append((model, df))
st.table(df)
# Store content in a temporary file
tmp_file = store_df_in_temp_file(all_dfs)
# Open the temporary file and read its content
with open(tmp_file, "rb") as file:
file_byte = file.read()
# Create download button
st.download_button(
"Download results",
data=file_byte,
file_name = f'nearest_neighbours_{target_word}.xlsx',
mime='application/octet-stream'
)
# Cosine similarity tab
elif active_tab == "Cosine similarity":
all_models_words = load_all_models_words()
with st.container():
eligible_models_1 = []
eligible_models_2 = []
st.markdown("## Cosine similarity")
st.markdown('Here you can extract the cosine similarity between two lemmas. Please select a time slice for each lemma. You can also calculate the cosine similarity between two vectors of the same lemma in different time slices.')
col1, col2 = st.columns(2)
col3, col4 = st.columns(2)
with col1:
word_1 = st.multiselect("Enter a word", placeholder="πατήρ", max_selections=1, options=all_models_words)
if len(word_1) > 0:
word_1 = word_1[0]
eligible_models_1 = models_for_word_dict[word_1]
with col2:
time_slice_1 = st.selectbox("Time slice word 1", options = eligible_models_1)
with st.container():
with col3:
word_2 = st.multiselect("Enter a word", placeholder="μήτηρ", max_selections=1, options=all_models_words)
if len(word_2) > 0:
word_2 = word_2[0]
eligible_models_2 = models_for_word_dict[word_2]
with col4:
time_slice_2 = st.selectbox("Time slice word 2", eligible_models_2)
# Create button for calculating cosine similarity
cosine_similarity_button = st.button("Calculate cosine similarity")
# If the button is clicked, execute calculation
if cosine_similarity_button:
cosine_simularity_score = get_cosine_similarity(word_1, time_slice_1, word_2, time_slice_2)
st.markdown('''<span style="font-size: 24px"> The Cosine Similarity between %s (%s) and %s (%s) is: **%s**</span>''' % (word_1, time_slice_1, word_2, time_slice_2, cosine_simularity_score), unsafe_allow_html=True)
# 3D graph tab
elif active_tab == "3D graph":
st.markdown("## 3D graph")
st.markdown('''
Here you can generate a 3D representation of the semantic space surrounding a target lemma. Please choose the lemma and the time slice.\
**NB**: the 3D representations are reductions of the multi-dimensional representations created by the models. \
This is necessary for visualization, but while reducing the dimnesions some informations gets lost. \
The 3D representations are thus not 100% accurate. For more information, please consult the FAQ.
''')
col1, col2 = st.columns(2)
# Load compressed word list
all_models_words = load_all_models_words()
with st.container():
eligible_models = []
with col1:
word = st.multiselect("Enter a word", all_models_words, max_selections=1)
if len(word) > 0:
word = word[0]
eligible_models = models_for_word_dict[word]
with col2:
time_slice = st.selectbox("Time slice", eligible_models)
n = st.slider("Number of words", 1, 50, 15)
graph_button = st.button("Create 3D graph")
if graph_button:
time_slice_model = convert_time_name_to_model(time_slice)
nearest_neighbours_vectors = get_nearest_neighbours_vectors(word, time_slice_model, n)
fig, df = make_3d_plot_tSNE(nearest_neighbours_vectors, word, time_slice_model)
st.plotly_chart(fig)
# Dictionary tab
elif active_tab == "Dictionary":
with st.container():
st.markdown('## Dictionary')
st.markdown('Search a word in the Liddell-Scott-Jones dictionary (only Greek, no whitespaces).')
all_lemmas = load_all_lemmas()
# query_word = st.multiselect("Search a word in the LSJ dictionary", all_lemmas, max_selections=1)
query_tag = st_tags(label='',
text = '',
value = [],
suggestions = all_lemmas,
maxtags = 1,
key = '1'
)
# If a word has been selected by user
if query_tag:
# Display word information
if query_tag[0] in lemma_dict:
st.write(f"### {query_tag[0]}")
data = lemma_dict[query_tag[0]]
elif query_tag[0].capitalize() in lemma_dict: # Some words are capitalized in the dictionary
st.write(f"### {query_tag[0].capitalize()}")
data = lemma_dict[query_tag[0].capitalize()]
else:
st.error("Word not found in dictionary")
exit(-1)
# Put text in readable format
text = format_text(data)
st.markdown(format_text(data), unsafe_allow_html = True)
st.markdown("""
<style>
.tab {
display: inline-block;
margin-left: 4em;
}
.tr {
font-weight: bold;
}
.list-class {
list-style-type: none;
margin-top: 1em;
}
.primary-indicator {
font-weight: bold;
font-size: x-large;
}
.secondary-indicator {
font-weight: bold;
font-size: large;
}
.tertiary-indicator {
font-weight: bold;
font-size: medium;
}
.quaternary-indicator {
font-weight: bold;
font-size: medium;
}
.primary-class {
padding-left: 2em;
}
.secondary-class {
padding-left: 4em;
}
.tertiary-class {
padding-left: 6em;
}
.quaternary-class {
padding-left: 8em;
}
</style>
""", unsafe_allow_html=True)
if selected == "About":
st.markdown("""
## About
Welcome to AGALMA | ἄγαλμα, the Ancient Greek Accessible Language Models for linguistic Analysis!
This interface was developed in the framework of Silvia Stopponi’s PhD project, \
supervised by Saskia Peels-Matthey and Malvina Nissim at the University of Groningen (The Netherlands). \
The aim of this tool is to make language models trained on Ancient Greek available to all interested people, respectless of their coding skills. \
The following people were involved in the creation of this interface:
**Mark den Ouden** developed the interface.
**Silvia Stopponi** trained the models, defined the structure of the interface, and wrote the textual content.
**Saskia Peels-Matthey** supervised the project and revised the structure of the interface and the textual content.
**Malvina Nissim** supervised the project.
**Anchoring Innovation** financially supported the creation of this interface. \
Anchoring Innovation is the Gravitation Grant research agenda of the Dutch National Research School in Classical Studies, OIKOS. \
It is financially supported by the Dutch ministry of Education, Culture and Science (NWO project number 024.003.012).
<div style="text-align: center; font-weight: bold;">How to cite</div>
If you use this interface for your research, please cite it as:
Stopponi, Silvia, Mark den Ouden, Saskia Peels-Matthey & Malvina Nissim. 2024. \
<span style="font-style: italic;">AGALMA: Ancient Greek Accessible Language Models for linguistic Analysis.</span>
""", unsafe_allow_html=True)
if selected == "FAQ":
st.markdown("""
## FAQ
""")
with st.expander(r"$\textsf{\Large What is this interface based on?}$"):
st.write(
"This interface is based on language models. Language models are probability distributions of \
words or word sequences, which store statistical information about word co-occurrences. \
This happens during the training phase, in which models process a corpus of texts in the \
target language(s). Once trained, linguistic information can be extracted from the models, or \
the models can be used to perform specific linguistic tasks. In this interface, we focus on the \
extraction of semantic information. To that end, we created five models, corresponding to five \
time slices. The models on which this interface is based are so-called Word Embedding \
models (the specific architecture is called Word2Vec)."
)
with st.expander(r"$\textsf{\Large What are Word Embeddings?}$"):
st.write(
"Word Embeddings are representations of words obtained via language modelling. More in \
detail, they are strings of numbers (called *vectors*) produced by a language model to \
represent each word in the training corpus in a multi-dimensional space. Words that are more \
similar in meaning will be closer to one another in this vector space (or semantic space) than \
words that are less similar in meaning. The term *word embeddings* is often used as a \
synonym of *predict models*, a type of language models introduced by Mikolov *et al.* (2013) \
with the Word2Vec architecture. This interface is built upon Word2Vec models."
)
with st.expander(r"$\textsf{\Large Which corpus was used to train the models?}$"):
st.markdown('''
The five models on which this interface is based were trained on five diachronic slices of the \
Diorisis Ancient Greek Corpus, which is ‘a digital collection of ancient Greek texts (from \
Homer to the early fifth century AD) compiled for linguistic analyses’ (Vatri &amp; McGillivray \
2018: 55). The Diorisis corpus contains a subset of the texts that can be found in the \
Thesaurus Linguae Graecae. More information about the works and authors included in each \
subcorpus is [here] '''
)
with st.expander(r"$\textsf{\Large How was the corpus divided into time slices?}$"):
st.write(
"The texts in the corpus were divided according to chronology. We tried to strike a balance \
between respecting the traditional divisions of Ancient Greek literature into periods and \
having slices of a more or less comparable size. The division is the following: \
\
Archaic: beginning-500 BCE; Classical: 499-324 BCE; Hellenistic: 323-0 BCE, Early Roman: \
1-250 CE; Late Roman: 251-500 CE."
)
with st.expander(r"$\textsf{\Large Which are the theoretical assumptions behind distributional semantic models, such as Word Embeddings?}$"):
st.write(
"Computational semantics is based on the Distributional Hypothesis. According to this \
hypothesis, words used in similar lexical contexts (contexts of words surrounding them) will \
have a similar meaning. This hypothesis was famously summarized by J.R. Firth as ‘you \
shall know a word by the company it keeps’ (1957: xx). Phrased differently, this \
means that two words that occur in similar lexical contexts are probably semantically \
related. The words that occur in the most similar lexical contexts are referred to as \
nearest neighbours. This does not necessarily mean, though, that these words even \
occur together. A detailed introduction to distributional semantics can be found in the book \
*Distributional Semantics* (Lenci &amp; Sahlgren 2023: 3-25)."
)
with st.expander(r"$\textsf{\Large What are the nearest neighbours?}$"):
st.write(
"Word vectors can be used as coordinates to represent words in a geometric space, called \
*semantic space*. Words with similar vectors, occurring in similar contexts, are closer in the \
space. The nearest neighbours to a word are the closest words to it in the semantic space. \
Words close in the space are not necessarily synonyms, they are rather in a relationship of \
semantic relatedness, i.e. they belong to the same semantic area. An example of neighbours \
in the space could be: *star – moon – sun – cloud – plane – fly – blue*."
)
with st.expander(r"$\textsf{\Large Are the nearest neighbours the same as concordances?}$"):
st.write(
"No. The nearest neighbours to a target word do not necessarily occur together with it in the \
same context, but each of them will be found in similar lexical contexts. For example, my \
colleague Pete and I may often go to the same type of conferences and meet the same \
group of people there, but it is quite possible that Pete and I never go to the same \
conference at the same time. Pete and I are similar, but not necessarily spending time \
together. The extraction of the nearest neighbours with word embeddings is thus different \
from finding concordances. The nearest neighbours cannot be extracted manually with close- \
reading methods."
)
with st.expander(r"$\textsf{\Large Which framework and parameters were used to train the models?}$"):
st.write(
"The Word2vec models were trained by using the CADE framework (Bianchi *et al.* 2020), a \
technique which does not require space alignment, i.e. word embeddings trained on different \
corpus slices are directly comparable. CADE was used with the following parameters: \
size=30, siter=5, diter=5, workers=4, sg=0, ns=20. The chosen architecture was the \
Continuous-Bag-of-Words. The context that is taken into account for each word are the 5 \
words before, and the 5 words after the target word."
)
with st.expander(r"$\textsf{\Large What is the cosine similarity value?}$"):
st.write(
"The cosine similarity is a measure of the distance between two words in the semantic space. \
More precisely, the cosine similarity is the cosine of angle between the two vectors in the \
multi-dimensional space. The value ranges from -1 to 1. The higher the value of the cosine \
similarity (the closer it is to 1), the closer two words are in the semantic space. For example, \
according to our model, the cosine similarity value of πατήρ and μήτηρ in the Classical period \
is 0.93, relatively high as we might expected for these obviously related words, while the \
cosine similarity value of a random pair like πατήρ and τράπεζα in the same time slice is \
0.12, considerably lower."
)
with st.expander(r"$\textsf{\Large What are the 3D representations?}$"):
st.write(
"The 3D representation is a way to graphically visualize the semantic space, the method used \
on this website is called t-SNE. Semantic spaces are multi-dimensional, with as many \
dimensions as the digits in the vectors. The embeddings used for this interface only have 30 \
dimensions. A 3D representation reduces the dimensions to 3, to allow for graphic \
representation. Even if 3D representations are effective means of making a semantic space \
visible, **they are not 100% accurate**, since the visualization shows a reduction of the 30 \
dimensions. We thus advise not to base any conclusions on the graphic representation only, \
but to rely on nearest neighbours extraction and on cosine similarity."
)
with st.expander(r"$\textsf{\Large Is the information stored by Word Embeddings reliable?}$"):
st.write(
"The information stored in word embeddings is solely based on the training corpus. This \
means that our models have no additional knowledge of the Ancient Greek language and \
culture. All information extracted from a model thus reflect word co-occurrences, and word \
meaning, in its specific training corpus. \
\
Please take into account that the results for words occurring very rarely may be inaccurate. \
Language modelling works on a statistical basis, so that a word with only few occurrences \
may not provide enough evidence to obtain reliable results. But it has been observed that an \
extremely high word frequency can also affect the results. It often happens that the nearest \
neighbours to words occurring very often are other high-frequency words, such as stop \
words (e.g., prepositions, articles, particles). "
)
with st.expander(r"$\textsf{\Large What if I obtain 'strange' results?}$"):
st.write(
"For the abovementioned reasons mentioned, word embeddings are not always reliable \
methods of semantic investigation. Interpretation of the results is always needed to decide \
whether the results at hand are real patterns present in the corpus, and could thus reveal \
interesting phenomena, or just noise present in the data."
)
with st.expander(r"$\textsf{\Large How can word embeddings help us study semantic change?}$"):
st.write(
"Cosine similarity can be computed between vectors of the same word in different time slices. \
The higher the cosine similarity, the more similar the usage of a word is in the two considered \
time slices. If the cosine similarity between a word’s vectors in two consecutive time slices is \
particularly low, there is a chance that semantic change happened at that point in time. The \
analysis of the nearest neighbours to the target word in the two slices can help clarifying if \
change actually happened, and which is its direction."
)
st.markdown("""
## References
Bianchi, F., Di Carlo, V., Nicoli, P., &amp; Palmonari, M. (2020). Compass-aligned distributional
embeddings for studying semantic differences across corpora. *arXiv preprint
arXiv:2004.06519*.
Lenci, A., &amp; Sahlgren, M. (2023). *Distributional semantics*. Cambridge University Press.
Mikolov, T., Chen, K., Corrado, G., &amp; Dean, J. (2013). Efficient estimation of word
representations in vector space. *arXiv preprint arXiv:1301.3781*.
Vatri, A., &amp; McGillivray, B. (2018). The Diorisis ancient Greek corpus: Linguistics and
literature. *Research Data Journal for the Humanities and Social Sciences*, 3(1), 55-65.
""")
if selected == "License":
st.markdown("""
## License
The cosine similarity, nearest neighbours, and 3D representation data are licensed under a CC BY License.
The LSJ dictionary has a CC BY-SA license and comes from the Unicode version of the dictionary produced by \
[Giuseppe G. A. Celano](%s). The original (Betacode) version is provided under a CC BY-SA license by the [Perseus Digital Library](https://www.perseus.tufts.edu/). \
Data available at https://github.com/PerseusDL/lexica/.
""" % 'https://github.com/gcelano/LSJ_GreekUnicode?tab=readme-ov-file')
streamlit_style = """
<style>
html, body {
font-family: 'Helvetica';
}
</style>
"""
st.markdown(streamlit_style, unsafe_allow_html=True)