File size: 9,732 Bytes
c8e1a23
 
 
 
 
 
be58ecd
 
 
 
b4e0bee
 
 
853c736
b4e0bee
853c736
 
 
b4e0bee
853c736
 
 
b4e0bee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a97943
2c0d348
19dcfe5
 
270fc78
19dcfe5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270fc78
19dcfe5
 
 
 
 
 
 
 
 
270fc78
 
 
 
 
 
 
 
19dcfe5
 
 
 
 
 
 
270fc78
19dcfe5
 
 
 
 
 
 
 
 
270fc78
19dcfe5
 
 
 
 
 
 
270fc78
19dcfe5
 
 
 
 
 
 
 
 
270fc78
 
 
 
 
 
 
 
19dcfe5
 
 
 
 
 
 
 
 
 
 
 
 
 
2c0d348
19dcfe5
 
2c0d348
 
 
 
 
 
 
270fc78
2c0d348
 
270fc78
2c0d348
 
 
 
 
270fc78
2c0d348
 
270fc78
2c0d348
 
 
 
 
 
 
 
 
 
c8e1a23
4a97943
b4e0bee
4a97943
270fc78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8e1a23
2c0d348
 
19dcfe5
 
 
 
270fc78
19dcfe5
b4e0bee
 
c8e1a23
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import gc
import psutil
import torch
import shutil
from transformers.utils.hub import TRANSFORMERS_CACHE
import streamlit as st
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))


def free_memory():
    #  """Free up CPU & GPU memory before loading a new model."""
    # global current_model, current_tokenizer

    # if current_model is not None:
    #     del current_model  # Delete the existing model
    #     current_model = None  # Reset reference

    # if current_tokenizer is not None:
    #     del current_tokenizer  # Delete the tokenizer
    #     current_tokenizer = None

    gc.collect()  # Force garbage collection for CPU memory

    if torch.cuda.is_available():
        torch.cuda.empty_cache()  # Free GPU memory
        torch.cuda.ipc_collect()  # Clean up PyTorch GPU cache

    # If running on CPU, reclaim memory using OS-level commands
    try:
        if torch.cuda.is_available() is False:
            psutil.virtual_memory()  # Refresh memory stats
    except Exception as e:
        print(f"Memory cleanup error: {e}")

    # Delete cached Hugging Face models
    try:
        cache_dir = TRANSFORMERS_CACHE
        if os.path.exists(cache_dir):
            shutil.rmtree(cache_dir)
            print("Cache cleared!")
    except Exception as e:
        print(f"❌ Cache cleanup error: {e}")


def create_sample_example1():
    st.write("""

        #### Sample Example 1

        """)
    graph = """

    digraph {

        // Global graph settings with explicit DPI

        graph [bgcolor="white", rankdir=TB, splines=true, nodesep=0.8, ranksep=0.8];

        node [shape=box, style="rounded,filled", fontname="Helvetica", fontsize=9, margin="0.15,0.1"];



        // Define nodes with custom colors

        Input [label="Input:\nbruh, floods in Kerala, rescue ops non-stop 🚁", fillcolor="#ffe6de", fontcolor="#000000"];

        Output [label="Output:\nBrother, the floods in Kerala are severe,\nand rescue operations are ongoing continuously.", fillcolor="#ffe6de", fontcolor="#000000"];

        Sentiment [label="Sentiment:\nNEUTRAL", fillcolor="#ecdeff", fontcolor="black"];



        // Emotion nodes with a uniform style

        Anger [label="Anger: 0.080178231", fillcolor="#deffe1", fontcolor="black"];

        Disgust [label="Disgust: 0.015257259", fillcolor="#deffe1", fontcolor="black"];

        Fear [label="Fear: 0.601871967", fillcolor="#deffe1", fontcolor="black"];

        Joy [label="Joy: 0.00410547", fillcolor="#deffe1", fontcolor="black"];

        Neutral [label="Neutral: 0.0341026", fillcolor="#deffe1", fontcolor="black"];

        Sadness [label="Sadness: 0.245294735", fillcolor="#deffe1", fontcolor="black"];

        Surprise [label="Surprise: 0.019189769", fillcolor="#deffe1", fontcolor="black"];



        // Define edges with a consistent style

        edge [color="#7a7a7a", penwidth=3];



        // Establish the tree structure

        Input -> Output;

        Input -> Sentiment;

        Sentiment -> Emotion

        Emotion -> Anger;

        Emotion -> Disgust;

        Emotion -> Fear;

        Emotion -> Joy;

        Emotion -> Neutral;

        Emotion -> Sadness;

        Emotion -> Surprise;

    }

    """
    st.graphviz_chart(graph)


def create_sample_example2():
    st.write("""

        #### Sample Example 2

        """)
    graph = """

    digraph {

        // Global graph settings

        graph [bgcolor="white", rankdir=TB, splines=true, nodesep=0.8, ranksep=0.8];

        node [shape=box, style="rounded,filled", fontname="Helvetica", fontsize=9, margin="0.15,0.1"];



        // Define nodes with custom colors

        Input [label="Input:\nu rlly think all that talk means u tough? lol, when I step up, u ain't gon say sh*t", fillcolor="#ffe6de", fontcolor="black"];

        Output [label="Output:\nyou really think all that talk makes you tough [lol](laughed out loud) when i step up you are not going to say anything", fillcolor="#ffe6de", fontcolor="black"];

        Sentiment [label="Sentiment:\nNEGATIVE", fillcolor="#ecdeff", fontcolor="black"];



        // Emotion nodes with a uniform style

        Anger [label="Anger: 0.14403291", fillcolor="#deffe1", fontcolor="black"];

        Disgust [label="Disgust: 0.039282672", fillcolor="#deffe1", fontcolor="black"];

        Fear [label="Fear: 0.014349542", fillcolor="#deffe1", fontcolor="black"];

        Joy [label="Joy: 0.048965044", fillcolor="#deffe1", fontcolor="black"];

        Neutral [label="Neutral: 0.494852662", fillcolor="#deffe1", fontcolor="black"];

        Sadness [label="Sadness: 0.021111647", fillcolor="#deffe1", fontcolor="black"];

        Surprise [label="Surprise: 0.237405464", fillcolor="#deffe1", fontcolor="black"];



        // Define edges with a consistent style

        edge [color="#7a7a7a", penwidth=3];



        // Establish the tree structure

        Input -> Output;

        Input -> Sentiment;

        Sentiment -> Emotion

        Emotion -> Anger;

        Emotion -> Disgust;

        Emotion -> Fear;

        Emotion -> Joy;

        Emotion -> Neutral;

        Emotion -> Sadness;

        Emotion -> Surprise;

    }

    """
    st.graphviz_chart(graph)


def create_project_overview():
    # st.divider()
    st.markdown("## Project Overview")
    st.write(f"""

        Tachygraphyβ€”originally developed to expedite writingβ€”has evolved over centuries. In the 1990s, it reappeared as micro-text, driving faster communication on social media with characteristics like 'Anytime, Anyplace, Anybody, and Anything (4A)'. This project focuses on the analysis and normalization of micro-text, which is a prevalent form of informal communication today. It aims to enhance Natural Language Processing (NLP) tasks by standardizing micro-text for better sentiment analysis, emotion analysis, data extraction and normalization to understandable form aka. 4A message decoding as primary objective.

        """
             )


def create_footer():
    # st.divider()
    st.markdown("## About Us")

    # πŸ› οΈ Layout using Streamlit columns
    col1, col2, col3 = st.columns([1, 1, 1])

    # πŸš€ Contributors Section
    with col1:
        st.markdown("### πŸš€ Contributors")
        st.write("##### **Archisman Karmakar**")
        st.write("[πŸ”— LinkedIn](https://www.linkedin.com/in/archismankarmakar/) | [πŸ™ GitHub](https://www.github.com/ArchismanKarmakar) | [πŸ“Š Kaggle](https://www.kaggle.com/archismancoder)")

        st.write("##### **Sumon Chatterjee**")
        st.write("[πŸ”— LinkedIn](https://www.linkedin.com/in/sumon-chatterjee-3b3b43227) | [πŸ™ GitHub](https://github.com/Sumon670) | [πŸ“Š Kaggle](https://www.kaggle.com/sumonchatterjee)")

    # πŸŽ“ Mentors Section
    with col2:
        st.markdown("### πŸŽ“ Mentors")
        st.write("##### **Prof. Anupam Mondal**")
        st.write("[πŸ”— LinkedIn](https://www.linkedin.com/in/anupam-mondal-ph-d-8a7a1a39/) | [πŸ“š Google Scholar](https://scholar.google.com/citations?user=ESRR9o4AAAAJ&hl=en) | [🌐 Website](https://sites.google.com/view/anupammondal/home)")

        st.write("##### **Prof. Sainik Kumar Mahata**")
        st.write("[πŸ”— LinkedIn](https://www.linkedin.com/in/mahatasainikk) | [πŸ“š Google Scholar](https://scholar.google.co.in/citations?user=OcJDM50AAAAJ&hl=en) | [🌐 Website](https://sites.google.com/view/sainik-kumar-mahata/home)")

    # πŸ“Œ Research Project Info Section
    with col3:
        st.markdown("### πŸ“ About the Project")
        st.write("This is our research project for our **B.Tech final year** and a **journal** which is yet to be published.")
        st.write("Built with πŸ’™ using **Streamlit**.")

# πŸš€ Display Footer


def show_dashboard():
    # free_memory()
    st.title("Tachygraphy Micro-text Analysis & Normalization")
    st.write(f"""Welcome to the Tachygraphy Micro-text Analysis & Normalization Project. This application is designed to analyze text data through three stages:""")
    coltl1, coltl2 = st.columns(2)
    with coltl1:
        st.write("""

            1. Sentiment Polarity Analysis

            2. Emotion Mood-tag Analysis

            3. Text Transformation & Normalization

            4. Stacked all 3 stages with their best models

            5. Data Correction & Collection

        """)
    with coltl2:
        st.write("""

                - Training Source: [GitHub @ Tachygraphy Micro-text Analysis & Normalization](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization)

                - Kaggle Collections: [Kaggle @ Tachygraphy Micro-text Analysis & Normalization](https://www.kaggle.com/datasets/archismancoder/dataset-tachygraphy/data?select=Tachygraphy_MicroText-AIO-V3.xlsx)

                - Hugging Face Org: [Hugging Face @ Tachygraphy Micro-text Analysis & Normalization](https://huggingface.co/Tachygraphy-Microtext-Normalization-IEMK25)

                - Deployment Source: [GitHub](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization-Deployment-Source-HuggingFace_Streamlit_JPX14032025)

                - Streamlit Deployemnt: [Streamlit](https://tachygraphy-microtext.streamlit.app/)

                - Hugging Face Space Deployment: [Hugging Face Space](https://huggingface.co/spaces/Tachygraphy-Microtext-Normalization-IEMK25/Tachygraphy-Microtext-Analysis-and-Normalization-ArchismanCoder)

                """)

    create_footer()

    create_project_overview()

    create_sample_example1()

    create_sample_example2()


def __main__():
    show_dashboard()