File size: 954 Bytes
73feb6e
 
 
fddf3ff
 
 
73feb6e
fddf3ff
73feb6e
fddf3ff
2e6b9d1
fddf3ff
 
 
0105d3b
eae7c24
 
 
fddf3ff
 
eae7c24
 
fddf3ff
 
0bd968a
0105d3b
fddf3ff
 
 
 
 
 
cac1b0e
fddf3ff
 
0105d3b
 
9b041cc
fddf3ff
0105d3b
 
c7add9e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#import asyncio
#import gc
#import logging
import os

import pandas as pd
#import psutil
import streamlit as st
#from PIL import Image
from streamlit import components
#from streamlit.caching import clear_cache
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from transformers_interpret import SequenceClassificationExplainer

#os.environ["TOKENIZERS_PARALLELISM"] = "false"
#logging.basicConfig(
#    format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
#)


#def print_memory_usage():
#    logging.info(f"RAM memory % used: {psutil.virtual_memory()[2]}")


@st.cache(allow_output_mutation=True, suppress_st_warning=True, max_entries=1)

def load_model(model_name):
    return (
        AutoModelForSequenceClassification.from_pretrained(model_name),
        AutoTokenizer.from_pretrained(model_name),
    )

print ("before main")


st.title("Transformers Interpet Demo App")





print ("end of total file")