File size: 1,469 Bytes
2e9a8c4
037452a
 
 
 
 
 
 
 
 
 
 
 
 
6ebd1a5
9455659
6ebd1a5
093ef16
6ebd1a5
 
093ef16
6ebd1a5
 
 
 
 
eb83f46
6ebd1a5
093ef16
037452a
903a7c4
5129a2d
87c8857
 
903a7c4
87c8857
 
 
 
903a7c4
 
87c8857
0ad143b
037452a
0ad143b
037452a
 
 
 
 
ef575bd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
import numpy as np
import pytesseract as pt
import pdf2image
from fpdf import FPDF
import re
import nltk
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
import os
import pdfkit
import yake
from summarizer import Summarizer,TransformerSummarizer
from transformers import pipelines
nltk.download('punkt')
from transformers import AutoTokenizer, AutoModelForPreTraining, AutoConfig, AutoModel
# model_name = 'distilbert-base-uncased'
model_name = 'nlpaueb/legal-bert-base-uncased'
#model_name = 'laxya007/gpt2_legal'
# model_name = 'facebook/bart-large-cnn'

# The setup of huggingface.co
custom_config = AutoConfig.from_pretrained(model_name)
custom_config.output_hidden_states=True
custom_tokenizer = AutoTokenizer.from_pretrained(model_name)
custom_model = AutoModel.from_pretrained(model_name, config=custom_config)
bert_legal_model = Summarizer(model = "distilbert-base-uncased", custom_model=custom_model, custom_tokenizer=custom_tokenizer)
print('Using model {}\n'.format(model_name))



def lincoln(content):
    
    bert_legal_model(content)
    
    summary = bert_legal_model(content, min_length = 8, ratio = 0.05)
    # summary = tokenizer_t5.decode(summary_ids[0], skip_special_tokens=True)
    print("Summary:")
    print(summary)
    all_text =  str(summary) 

    return all_text

iface = gr.Interface(
    lincoln, 
    "text", 
    "text"
   )

if __name__ == "__main__":
    iface.launch(share=False)