File size: 2,438 Bytes
ea5c59c
 
 
8d4620d
9ec3b13
ea5c59c
0cbdeb5
 
 
 
 
 
 
 
8d4620d
28c51ee
 
8d4620d
 
 
ea5c59c
8d4620d
ea5c59c
1ed0b9b
54f71b8
075ace4
54f71b8
 
 
 
fbb7eab
c1816fd
0ba8479
 
230d178
 
 
 
 
8d4620d
c1816fd
9a0b416
fda22ce
 
 
51b9227
fda22ce
339798f
17f90e8
51b9227
0cbdeb5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17f90e8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import streamlit as st
import pandas as pd
import numpy as np
import os
from grobidmonkey import reader

from transformers import pipeline
from transformers import BartTokenizer, BartModel, BartForConditionalGeneration
from transformers import T5Tokenizer, T5ForConditionalGeneration

from document import Document
from BartSE import BARTAutoEncoder


def save_uploaded_file(uploaded_file):
    file_path = os.path.join("./uploads", uploaded_file.name)
    os.makedirs("./uploads", exist_ok=True)  # Create 'uploads' directory if it doesn't exist
    with open(file_path, "wb") as f:
        f.write(uploaded_file.getbuffer())
    return file_path  # Return the file path as a string

st.title('Paper2Slides')

st.subheader('Upload paper in pdf format')

col1, col2 = st.columns([3, 1])
with col1:
    uploaded_file = st.file_uploader("Choose a file")
with col2:
    option = st.selectbox(
        'Select parsing method.',
        ('monkey', 'x2d', 'lxml'))

if uploaded_file is not None:
    
    st.write(uploaded_file.name)
    bytes_data = uploaded_file.getvalue()
    st.write(len(bytes_data), "bytes")
    
    saved_file_path = save_uploaded_file(uploaded_file)
    monkeyReader = reader.MonkeyReader(option)
    outline = monkeyReader.readOutline(saved_file_path)
    for pre, fill, node in outline:
        st.write("%s%s" % (pre, node.name))
    
    
    # read paper content
    essay = monkeyReader.readEssay(saved_file_path)
    for key, values in essay.items():
        st.write(f"{key}: {', '.join(values)}")

    Barttokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
    summ_model_path = 'com3dian/Bart-large-paper2slides-summarizer'
    summarizor = BartForConditionalGeneration.from_pretrained(summ_model_path)
    exp_model_path = 'com3dian/Bart-large-paper2slides-expander'
    expandor = BartForConditionalGeneration.from_pretrained(exp_model_path)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    BartSE = BARTAutoEncoder(summarizor, summarizor, device)
    del summarizor, expandor
        
    document = Document(article, Barttokenizer)
    del Barttokenizer
    length = document.merge(10, 30, BartSE, device)

    summarizor = pipeline("summarization", model=summ_model_path, device = 0)
    summ_text = summarizor(document.segmentation['text'], max_length=100, min_length=10, do_sample=False)
    summ_text = [text['summary_text'] for text in summ_text]