File size: 1,408 Bytes
fddf3ff
 
 
 
 
 
 
 
 
 
2e6b9d1
fddf3ff
 
 
0105d3b
eae7c24
 
 
fddf3ff
 
eae7c24
 
fddf3ff
 
0bd968a
0105d3b
fddf3ff
 
 
 
 
 
cac1b0e
fddf3ff
 
0105d3b
 
fddf3ff
9b041cc
 
0105d3b
 
 
 
 
 
 
9b041cc
fddf3ff
0105d3b
 
c7add9e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import asyncio
import gc
import logging
import os

import pandas as pd
import psutil
import streamlit as st
from PIL import Image
from streamlit import components
#from streamlit.caching import clear_cache
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from transformers_interpret import SequenceClassificationExplainer

#os.environ["TOKENIZERS_PARALLELISM"] = "false"
#logging.basicConfig(
#    format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
#)


#def print_memory_usage():
#    logging.info(f"RAM memory % used: {psutil.virtual_memory()[2]}")


@st.cache(allow_output_mutation=True, suppress_st_warning=True, max_entries=1)

def load_model(model_name):
    return (
        AutoModelForSequenceClassification.from_pretrained(model_name),
        AutoTokenizer.from_pretrained(model_name),
    )

print ("before main")


st.title("Transformers Interpet Demo App")
print ("before main")

#image = Image.open("./images/tight@1920x_transparent.png")
#st.sidebar.image(image, use_column_width=True)
st.sidebar.markdown(
    "Check out the package on [Github](https://github.com/cdpierse/transformers-interpret)"
)
st.info(
    "Due to limited resources only low memory models are available. Run this [app locally](https://github.com/cdpierse/transformers-interpret-streamlit) to run the full selection of available models. "
)





print ("end of total file")