Update src/pdfchatbot.py
Browse files- src/pdfchatbot.py +9 -11
src/pdfchatbot.py
CHANGED
@@ -3,10 +3,10 @@ import fitz
|
|
3 |
import torch
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
-
from
|
7 |
-
from
|
8 |
from langchain.chains import ConversationalRetrievalChain
|
9 |
-
from
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
12 |
import spaces
|
@@ -17,7 +17,6 @@ class PDFChatBot:
|
|
17 |
def __init__(self, config_path="config.yaml"):
|
18 |
"""
|
19 |
Initialize the PDFChatBot instance.
|
20 |
-
|
21 |
Parameters:
|
22 |
config_path (str): Path to the configuration file (default is "../config.yaml").
|
23 |
"""
|
@@ -33,9 +32,9 @@ class PDFChatBot:
|
|
33 |
self.model = None
|
34 |
self.pipeline = None
|
35 |
self.chain = None
|
36 |
-
self.chunk_size =
|
37 |
self.overlap_percentage = 50
|
38 |
-
self.max_chunks_in_context =
|
39 |
self.current_context = None
|
40 |
self.model_temperatue = 0.5
|
41 |
self.format_seperator="""\n\n--\n\n"""
|
@@ -60,13 +59,13 @@ class PDFChatBot:
|
|
60 |
print("Vector store created")
|
61 |
@spaces.GPU
|
62 |
def load_tokenizer(self):
|
63 |
-
self.tokenizer = AutoTokenizer.from_pretrained("
|
64 |
|
65 |
@spaces.GPU
|
66 |
def create_organic_pipeline(self):
|
67 |
self.pipe = pipeline(
|
68 |
"text-generation",
|
69 |
-
model="
|
70 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
71 |
device="cuda",
|
72 |
)
|
@@ -84,7 +83,7 @@ class PDFChatBot:
|
|
84 |
"""
|
85 |
pipe = pipeline(
|
86 |
"text-generation",
|
87 |
-
model="
|
88 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
89 |
device="cuda",
|
90 |
)
|
@@ -102,7 +101,7 @@ class PDFChatBot:
|
|
102 |
temp = 0.1
|
103 |
outputs = self.pipe(
|
104 |
prompt,
|
105 |
-
max_new_tokens=
|
106 |
do_sample=True,
|
107 |
temperature=temp,
|
108 |
top_p=0.9,
|
@@ -114,7 +113,6 @@ class PDFChatBot:
|
|
114 |
def process_file(self, file):
|
115 |
"""
|
116 |
Process the uploaded PDF file and initialize necessary components: Tokenizer, VectorDB and LLM.
|
117 |
-
|
118 |
Parameters:
|
119 |
file (FileStorage): The uploaded PDF file.
|
120 |
"""
|
|
|
3 |
import torch
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
7 |
+
from langchain.vectorstores import Chroma
|
8 |
from langchain.chains import ConversationalRetrievalChain
|
9 |
+
from langchain.document_loaders import PyPDFLoader
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
12 |
import spaces
|
|
|
17 |
def __init__(self, config_path="config.yaml"):
|
18 |
"""
|
19 |
Initialize the PDFChatBot instance.
|
|
|
20 |
Parameters:
|
21 |
config_path (str): Path to the configuration file (default is "../config.yaml").
|
22 |
"""
|
|
|
32 |
self.model = None
|
33 |
self.pipeline = None
|
34 |
self.chain = None
|
35 |
+
self.chunk_size = 512
|
36 |
self.overlap_percentage = 50
|
37 |
+
self.max_chunks_in_context = 2
|
38 |
self.current_context = None
|
39 |
self.model_temperatue = 0.5
|
40 |
self.format_seperator="""\n\n--\n\n"""
|
|
|
59 |
print("Vector store created")
|
60 |
@spaces.GPU
|
61 |
def load_tokenizer(self):
|
62 |
+
self.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
63 |
|
64 |
@spaces.GPU
|
65 |
def create_organic_pipeline(self):
|
66 |
self.pipe = pipeline(
|
67 |
"text-generation",
|
68 |
+
model="meta-llama/Meta-Llama-3-8B-Instruct",
|
69 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
70 |
device="cuda",
|
71 |
)
|
|
|
83 |
"""
|
84 |
pipe = pipeline(
|
85 |
"text-generation",
|
86 |
+
model="meta-llama/Meta-Llama-3-8B-Instruct",
|
87 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
88 |
device="cuda",
|
89 |
)
|
|
|
101 |
temp = 0.1
|
102 |
outputs = self.pipe(
|
103 |
prompt,
|
104 |
+
max_new_tokens=1024,
|
105 |
do_sample=True,
|
106 |
temperature=temp,
|
107 |
top_p=0.9,
|
|
|
113 |
def process_file(self, file):
|
114 |
"""
|
115 |
Process the uploaded PDF file and initialize necessary components: Tokenizer, VectorDB and LLM.
|
|
|
116 |
Parameters:
|
117 |
file (FileStorage): The uploaded PDF file.
|
118 |
"""
|