Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,9 @@ import gradio as gr
|
|
6 |
# Use GPU if available
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
|
9 |
-
# Base model and adapter paths
|
10 |
base_model_name = "microsoft/phi-2" # Pull from HF Hub directly
|
11 |
-
adapter_path = "Shriti09/
|
12 |
|
13 |
print("🔧 Loading base model...")
|
14 |
base_model = AutoModelForCausalLM.from_pretrained(
|
@@ -61,7 +61,8 @@ def chat_fn(message, history):
|
|
61 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
62 |
gr.Markdown("<h1>🧠 Phi-2 QLoRA Chatbot</h1>")
|
63 |
|
64 |
-
|
|
|
65 |
message = gr.Textbox(label="Your message:")
|
66 |
clear = gr.Button("Clear chat")
|
67 |
|
@@ -71,5 +72,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
71 |
clear.click(lambda: [], None, chatbot)
|
72 |
clear.click(lambda: [], None, state)
|
73 |
|
74 |
-
# Run
|
75 |
-
demo.queue(
|
|
|
6 |
# Use GPU if available
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
|
9 |
+
# Base model and adapter paths (updated for Hugging Face repo)
|
10 |
base_model_name = "microsoft/phi-2" # Pull from HF Hub directly
|
11 |
+
adapter_path = "Shriti09/phi2-qlora-adapter" # Update with your Hugging Face repo path
|
12 |
|
13 |
print("🔧 Loading base model...")
|
14 |
base_model = AutoModelForCausalLM.from_pretrained(
|
|
|
61 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
62 |
gr.Markdown("<h1>🧠 Phi-2 QLoRA Chatbot</h1>")
|
63 |
|
64 |
+
# Use 'type' parameter to specify message format for gr.Chatbot()
|
65 |
+
chatbot = gr.Chatbot(type="messages") # Use 'messages' type for structured messages
|
66 |
message = gr.Textbox(label="Your message:")
|
67 |
clear = gr.Button("Clear chat")
|
68 |
|
|
|
72 |
clear.click(lambda: [], None, chatbot)
|
73 |
clear.click(lambda: [], None, state)
|
74 |
|
75 |
+
# Run the app without the 'concurrency_count' argument
|
76 |
+
demo.queue().launch()
|