File size: 2,621 Bytes
b6a467a 4e2c9cd cd618c5 690b43c 241003b cd618c5 241003b b6a467a 5606667 2ab9d34 690b43c e63c0a3 690b43c b6a467a 2ab9d34 cd618c5 690b43c cd618c5 690b43c cd618c5 2ab9d34 54072ad b6a467a cd618c5 b6a467a 54072ad cd618c5 b6a467a 54072ad b6a467a 2ab9d34 b6a467a cd618c5 b6a467a cd618c5 b6a467a 2ab9d34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
from huggingface_hub import InferenceClient
import faiss
import numpy as np
import os
import time
import threading # β
Run embeddings in parallel
# β
Ensure FAISS is installed
os.system("pip install faiss-cpu")
def log(message):
print(f"β
{message}")
# β
Step 1: Run Embeddings in a Separate Thread
def run_embeddings():
log("π Running embeddings script in background...")
import embeddings # β
This will automatically run embeddings.py
log("β
Embeddings process finished.")
embedding_thread = threading.Thread(target=run_embeddings)
embedding_thread.start() # β
Start embedding in background
# β
Step 2: Check FAISS index
def check_faiss():
index_path = "my_embeddings.faiss" # Ensure file has .faiss extension
if not os.path.exists(index_path):
return "β οΈ No FAISS index found! Embeddings might still be processing."
try:
index = faiss.read_index(index_path)
num_vectors = index.ntotal
dim = index.d
return f"π FAISS index contains {num_vectors} vectors.\nβ
Embedding dimension: {dim}"
except Exception as e:
return f"β ERROR: Failed to load FAISS index - {e}"
log("π Checking FAISS embeddings...")
faiss_status = check_faiss()
log(faiss_status)
# β
Step 3: Initialize Chatbot
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
def respond(message, history, system_message, max_tokens, temperature, top_p):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completions(
messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
):
token = message["choices"][0]["delta"]["content"]
response += token
yield response
# β
Step 4: Start Chatbot Interface
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
)
log("β
All systems go! Launching chatbot...")
if __name__ == "__main__":
demo.launch()
|