paul_graham_essays_rag / setup_db.py
abhinand2's picture
Create setup_db.py
dc92420 verified
raw
history blame
1.51 kB
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
import torch
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
file_path="./paul_graham_essays.csv"
model_path = 'intfloat/multilingual-e5-large-instruct'
db_persist_directory = './docs/chroma/'
def load_data():
loader = CSVLoader(
file_path=file_path,
csv_args={
"delimiter": ",",
"fieldnames": ['id', 'title', 'date', 'text'],
},
source_column='title',
metadata_columns=['date'],
content_columns=['text'],
)
data = loader.load()
return data[1:]
def split_docs(docs):
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
separators=['\n\n', '\n', '(?<=\. )', ' ', '']
)
return splitter.split_documents(docs)
def generate_embeddings():
device = "cuda" if torch.cuda.is_available() else "cpu"
model_kwargs = {'device': device}
encode_kwargs = {'normalize_embeddings': False}
return HuggingFaceEmbeddings(
model_name=model_path,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
)
def get_db():
splits = split_docs(load_data())
embedding = generate_embeddings()
return Chroma.from_documents(
documents=splits,
embedding=embedding,
persist_directory=persist_directory,
)