thak123 commited on
Commit
de2bdfb
·
1 Parent(s): 3cc0045

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -4
app.py CHANGED
@@ -4,21 +4,56 @@ import sys
4
  import dataset
5
  import engine
6
  from model import BERTBaseUncased
7
- from tokenizer import tokenizer
8
  import config
9
  from transformers import pipeline, AutoTokenizer, AutoModel
10
  import gradio as gr
11
 
 
 
 
 
12
  device = config.device
13
  model = BERTBaseUncased()
14
  model.load_state_dict(torch.load(config.MODEL_PATH, map_location=torch.device(device)),strict=False)
15
  model.to(device)
16
 
17
- T = tokenizer.TweetTokenizer(
18
- preserve_handles=True, preserve_hashes=True, preserve_case=False, preserve_url=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def preprocess(text):
21
- tokens = T.tokenize(text)
 
22
  print(tokens, file=sys.stderr)
23
  ptokens = []
24
  for index, token in enumerate(tokens):
 
4
  import dataset
5
  import engine
6
  from model import BERTBaseUncased
7
+
8
  import config
9
  from transformers import pipeline, AutoTokenizer, AutoModel
10
  import gradio as gr
11
 
12
+ from ekphrasis.classes.preprocessor import TextPreProcessor
13
+ from ekphrasis.classes.tokenizer import SocialTokenizer
14
+ from ekphrasis.dicts.emoticons import emoticons
15
+
16
  device = config.device
17
  model = BERTBaseUncased()
18
  model.load_state_dict(torch.load(config.MODEL_PATH, map_location=torch.device(device)),strict=False)
19
  model.to(device)
20
 
21
+
22
+
23
+ text_processor = TextPreProcessor(
24
+ # terms that will be normalized
25
+ normalize=['url', 'email', 'percent', 'money', 'phone', 'user'],
26
+ # terms that will be annotated
27
+ annotate={},
28
+ fix_html=True, # fix HTML tokens
29
+
30
+ # corpus from which the word statistics are going to be used
31
+ # for word segmentation
32
+ segmenter="twitter",
33
+
34
+ # corpus from which the word statistics are going to be used
35
+ # for spell correction
36
+ corrector="twitter",
37
+
38
+ unpack_hashtags=False, # perform word segmentation on hashtags
39
+ unpack_contractions=False, # Unpack contractions (can't -> can not)
40
+ spell_correct_elong=False, # spell correction for elongated words
41
+
42
+ # select a tokenizer. You can use SocialTokenizer, or pass your own
43
+ # the tokenizer, should take as input a string and return a list of tokens
44
+ tokenizer=SocialTokenizer(lowercase=True).tokenize,
45
+
46
+ # list of dictionaries, for replacing tokens extracted from the text,
47
+ # with other expressions. You can pass more than one dictionaries.
48
+ dicts=[]
49
+ )
50
+
51
+ # T = tokenizer.TweetTokenizer(
52
+ # preserve_handles=True, preserve_hashes=True, preserve_case=False, preserve_url=False)
53
 
54
  def preprocess(text):
55
+ # tokens = T.tokenize(text)
56
+ tokens = text_processor.pre_process_docs(text)
57
  print(tokens, file=sys.stderr)
58
  ptokens = []
59
  for index, token in enumerate(tokens):