Spaces:
Sleeping
Sleeping
| ######################################################################################### | |
| # Title: Gradio Interface to LLM-chatbot with RAG-funcionality and ChromaDB on HF-Hub | |
| # Author: Andreas Fischer | |
| # Date: December 29th, 2023 | |
| # Last update: December 31th, 2023 | |
| ########################################################################################## | |
| # Chroma-DB | |
| #----------- | |
| import os | |
| import chromadb | |
| dbPath="/home/af/Schreibtisch/gradio/Chroma/db" | |
| if(os.path.exists(dbPath)==False): | |
| dbPath="/home/user/app/db" | |
| print(dbPath) | |
| #client = chromadb.Client() | |
| path=dbPath | |
| client = chromadb.PersistentClient(path=path) | |
| print(client.heartbeat()) | |
| print(client.get_version()) | |
| print(client.list_collections()) | |
| from chromadb.utils import embedding_functions | |
| default_ef = embedding_functions.DefaultEmbeddingFunction() | |
| sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="T-Systems-onsite/cross-en-de-roberta-sentence-transformer") | |
| #instructor_ef = embedding_functions.InstructorEmbeddingFunction(model_name="hkunlp/instructor-large", device="cuda") | |
| print(str(client.list_collections())) | |
| global collection | |
| if("name=ChromaDB1" in str(client.list_collections())): | |
| print("ChromaDB1 found!") | |
| collection = client.get_collection(name="ChromaDB1", embedding_function=sentence_transformer_ef) | |
| else: | |
| print("ChromaDB1 created!") | |
| collection = client.create_collection( | |
| "ChromaDB1", | |
| embedding_function=sentence_transformer_ef, | |
| metadata={"hnsw:space": "cosine"}) | |
| collection.add( | |
| documents=["The meaning of life is to love.", "This is a sentence", "This is a sentence too"], | |
| metadatas=[{"source": "notion"}, {"source": "google-docs"}, {"source": "google-docs"}], | |
| ids=["doc1", "doc2", "doc3"], | |
| ) | |
| print("Database ready!") | |
| print(collection.count()) | |
| # Model | |
| #------- | |
| from huggingface_hub import InferenceClient | |
| import gradio as gr | |
| client = InferenceClient( | |
| "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
| #"mistralai/Mistral-7B-Instruct-v0.1" | |
| ) | |
| # Gradio-GUI | |
| #------------ | |
| import gradio as gr | |
| import json | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| #for user_prompt, bot_response in history: | |
| # prompt += f"[INST] {user_prompt} [/INST]" | |
| # prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def response( | |
| prompt, history, temperature=0.9, max_new_tokens=500, top_p=0.95, repetition_penalty=1.0, | |
| ): | |
| temperature = float(temperature) | |
| if temperature < 1e-2: temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| addon="" | |
| results=collection.query( | |
| query_texts=[prompt], | |
| n_results=2, | |
| #where={"source": "google-docs"} | |
| #where_document={"$contains":"search_string"} | |
| ) | |
| dists=["<small>(relevance: "+str(round((1-d)*100)/100)+";" for d in results['distances'][0]] | |
| sources=["source: "+s["source"]+")</small>" for s in results['metadatas'][0]] | |
| results=results['documents'][0] | |
| combination = zip(results,dists,sources) | |
| combination = [' '.join(triplets) for triplets in combination] | |
| print(combination) | |
| if(len(results)>1): | |
| addon=" Bitte berücksichtige bei deiner Antwort ggf. folgende Auszüge aus unserer Datenbank, sofern sie für die Antwort erforderlich sind. Beantworte die Frage knapp und präzise. Ignoriere unpassende Datenbank-Auszüge OHNE sie zu kommentieren, zu erwähnen oder aufzulisten:\n"+"\n".join(results) | |
| system="Du bist ein KI-basiertes Assistenzsystem."+addon+"\n\nUser-Anliegen:" | |
| #body={"prompt":system+"### Instruktion:\n"+message+"\n\n### Antwort:","max_tokens":500, "echo":"False","stream":"True"} #e.g. SauerkrautLM | |
| formatted_prompt = format_prompt(system+"\n"+prompt, history) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| output=output+"\n\n<br><details open><summary><strong>Sources</strong></summary><br><ul>"+ "".join(["<li>" + s + "</li>" for s in combination])+"</ul></details>" | |
| yield output | |
| gr.ChatInterface(response, chatbot=gr.Chatbot(render_markdown=True),title="German RAG-Interface to the Hugging Face Hub").queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864) | |
| print("Interface up and running!") |