File size: 4,363 Bytes
72aebc1
b676165
c6a49dd
b676165
 
ca1744a
c6a49dd
b676165
1393d5f
 
6658f37
 
 
 
 
46f3018
c6a49dd
 
b1d9638
f6c70f7
 
 
 
c6a49dd
6658f37
e7e4a86
6658f37
f6c70f7
b676165
e7e4a86
b676165
ca1744a
e7e4a86
 
72aebc1
 
b676165
72aebc1
b676165
 
72aebc1
b676165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72aebc1
 
 
 
 
 
 
ca1744a
 
 
72aebc1
 
 
 
 
 
 
 
 
e7e4a86
b676165
 
ca1744a
 
 
 
 
 
 
 
 
 
 
 
b676165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import os
import gradio as gr
from langchain_community.vectorstores import Chroma
from transformers import RagTokenizer, RagSequenceForGeneration
from sentence_transformers import SentenceTransformer
from langchain import LLMChain, PromptTemplate
from langchain_community.llms import HuggingFacePipeline

#Konstanten
ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
PATH_WORK = "."
CHROMA_DIR  = "/chroma/kkg"
CHROMA_PDF = './chroma/kkg/pdf'
CHROMA_WORD = './chroma/kkg/word'
CHROMA_EXCEL = './chroma/kkg/excel'

# Hugging Face Token direkt im Code setzen
hf_token = os.getenv("HF_READ")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HF_READ")

# Initialisierung des Sentence-BERT Modells für die Embeddings
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')

# Initialisierung von Tokenizer und RAG Modell mit Token
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq", token=hf_token)
retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", token=hf_token, use_dummy_dataset=True)
model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", token=hf_token)

# Verbindung zur Chroma DB und Laden der Dokumente
chroma_db = Chroma(embedding=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)

# Erstellung eines HuggingFacePipeline LLM Modells
llm_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer, retriever=retriever)
llm = HuggingFacePipeline(pipeline=llm_pipeline)


# Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
#retriever = chroma_db.as_retriever()

# Erstellung der RAG-Kette mit dem benutzerdefinierten Retriever
#rag_chain = RagChain(model=model, retriever=retriever, tokenizer=tokenizer, vectorstore=chroma_db)
#############################################


def document_retrieval_chroma2(): 
    #HF embeddings -----------------------------------
    #Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen - die ...InstructEmbedding ist sehr rechenaufwendig
    embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
    #etwas weniger rechenaufwendig:
    #embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cpu"}, encode_kwargs={'normalize_embeddings': False})
    #oder einfach ohne Langchain:
    #embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")

    #ChromaDb um die embedings zu speichern
    db = Chroma(embedding_function = embeddings, persist_directory = PATH_WORK + CHROMA_DIR)
    print ("Chroma DB bereit ...................")
    
    return db 
	
	
	
def get_rag_response(question):
    # Abfrage der relevanten Dokumente aus Chroma DB
    docs = chroma_db.search(question, top_k=5)
    passages = [doc['text'] for doc in docs]
    links = [doc.get('url', 'No URL available') for doc in docs]

    # Generieren der Antwort
    prompt_template = PromptTemplate(input_variables=["context", "question"], template="{context}\n\n{question}")
    prompt = prompt_template(context=" ".join(passages), question=question)
    answer = llm(prompt)
    
    # Zusammenstellen der Ausgabe
    response = {
        "answer": answer,
        "documents": [{"link": link, "passage": passage} for link, passage in zip(links, passages)]
    }
    
    return response


	
	
# Funktion, die für den Chatbot genutzt wird
def chatbot_response(user_input, chat_history=[]):
    response = get_rag_response(user_input)
    answer = response['answer']
    documents = response['documents']
    
    doc_links = "\n\n".join([f"Link: {doc['link']}\nPassage: {doc['passage']}" for doc in documents])
    
    bot_response = f"{answer}\n\nRelevant Documents:\n{doc_links}"
    
    chat_history.append((user_input, bot_response))
    return chat_history, chat_history
	
	
#############################
#GUI.........
def user (user_input, history):
	return "", history + [[user_input, None]]

with gr.Blocks() as chatbot:
	chat_interface = gr.Chatbot()
	msg = gr.Textbox()
	clear = gr.Button("Löschen")
	
	#Buttons listener
	msg.submit(user, [msg, chat_interface], [msg, chat_interface], queue = False). then(chatbot_response, [msg, chat_interface], [chat_interface, chat_interface])
	
	clear.click(lambda: None, None, chat_interface, queue=False)
	
chatbot.launch()