File size: 4,187 Bytes
301614f
 
 
9035153
 
 
604b59c
 
 
 
 
6c36800
604b59c
9a664f0
725d485
2cd4e0a
 
 
 
058d9a5
 
2cd4e0a
 
 
604b59c
058d9a5
725d485
9035153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dffeb2d
725d485
b88b1f3
725d485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dffeb2d
725d485
 
 
 
 
 
 
 
9035153
dffeb2d
 
 
9035153
 
b88b1f3
301614f
725d485
b88b1f3
 
301614f
b88b1f3
 
301614f
b88b1f3
301614f
b88b1f3
 
9035153
b88b1f3
9035153
b88b1f3
 
9035153
b88b1f3
 
 
 
 
9035153
b88b1f3
5573a68
2024184
dffeb2d
911a8be
5573a68
911a8be
 
 
 
725d485
23d4171
725d485
 
911a8be
 
5573a68
911a8be
 
 
 
 
 
23d4171
2024184
dffeb2d
 
a610295
24464a6
dffeb2d
2024184
 
 
 
fc4d061
7471e3e
23d4171
7f9bf9b
2347d67
a610295
5f3fdac
dffeb2d
 
7c1d20d
 
fc4d061
 
 
 
2024184
 
 
 
 
 
 
 
fc4d061
058d9a5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import gradio as gr
from langchain.document_loaders import PDFMinerLoader, PyMuPDFLoader
from langchain.text_splitter import CharacterTextSplitter
import chromadb
import chromadb.config
from chromadb.config import Settings
from transformers import T5ForConditionalGeneration, AutoTokenizer
import torch
import gradio as gr
import uuid
from sentence_transformers import SentenceTransformer
import os

global file_name

model_name = 'google/flan-t5-base'
model = T5ForConditionalGeneration.from_pretrained(model_name, device_map='auto', offload_folder="offload")
tokenizer = AutoTokenizer.from_pretrained(model_name)
print('flan read')


ST_name = 'sentence-transformers/sentence-t5-base'
st_model = SentenceTransformer(ST_name)
print('sentence read')


def get_context(query_text, collection):
    query_emb = st_model.encode(query_text)
    query_response = collection.query(query_embeddings=query_emb.tolist(), n_results=4)
    context = query_response['documents'][0][0]
    context = context.replace('\n', ' ').replace('  ', ' ')
    return context

def local_query(query, context):
    t5query = """Using the available context, please answer the question. 
    If you aren't sure please say i don't know.
    Context: {}
    Question: {}
    """.format(context, query)
    
    inputs = tokenizer(t5query, return_tensors="pt")
    outputs = model.generate(**inputs, max_new_tokens=20)
    
    return tokenizer.batch_decode(outputs, skip_special_tokens=True)

def run_query(history, query):

    # pdf file name input olarak verip, buraya upload event olarak gondermem gereki rmi
    loader = PDFMinerLoader(pdf_filename)
    doc = loader.load()

    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    texts = text_splitter.split_documents(doc)

    texts = [i.page_content for i in texts]

    doc_emb = st_model.encode(texts)
    doc_emb = doc_emb.tolist()

    ids = [str(uuid.uuid1()) for _ in doc_emb]

    client = chromadb.Client()
    collection = client.create_collection("test_db") 
    
    collection.add(
        embeddings=doc_emb,
        documents=texts,
        ids=ids
    )


    context = get_context(query, collection)
    result = local_query(query, context)

    history = history.append(query)
    return history, result


# def load_document(pdf_filename):

    
#     loader = PDFMinerLoader(pdf_filename)
#     doc = loader.load()

#     text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
#     texts = text_splitter.split_documents(doc)

#     texts = [i.page_content for i in texts]

#     doc_emb = st_model.encode(texts)
#     doc_emb = doc_emb.tolist()

#     ids = [str(uuid.uuid1()) for _ in doc_emb]

#     client = chromadb.Client()
#     collection = client.create_collection("test_db") 
    
#     collection.add(
#         embeddings=doc_emb,
#         documents=texts,
#         ids=ids
#     )

#     return 'Success'




def upload_pdf(file):
    try:
        # Check if the file is not None before accessing its attributes
        if file is not None:
            # Save the uploaded file
            file_name = file.name 

            # messsage = load_document(file_name)  
            return 'Successfully uploaded!'
        else:
            return "No file uploaded."

    except Exception as e:
        return f"An error occurred: {e}"



    
 
with gr.Blocks() as demo:  
    
    btn = gr.UploadButton("Upload a PDF", file_types=[".pdf"])
    output = gr.Textbox(label="Output Box")
    chatbot = gr.Chatbot(value=[], elem_id="chatbot")
    
    with gr.Row():
        with gr.Column(scale=0.70):
            txt = gr.Textbox(
                show_label=False,
                placeholder="Enter a question",
            ) 

 
    # Event handler for uploading a PDF
    btn.upload(fn=upload_pdf, inputs=[btn], outputs=[output])
    txt.submit(run_query, [chatbot, txt], [chatbot, txt])
    #.then(
            # generate_response, inputs =[chatbot,],outputs = chatbot,)

    

demo.launch()

 
# iface = gr.Interface(
#     fn=upload_pdf,
#     inputs="file",
#     outputs="text",
#     title="PDF File Uploader",
#     description="Upload a PDF file and get its filename.",
# )