eikarna
commited on
Commit
·
2aecac8
1
Parent(s):
d850ee8
Minor Update
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from typing import Dict, Any
|
|
7 |
DEFAULT_SYSTEM_PROMPT = """You are a friendly Assistant. Provide clear, accurate, and brief answers.
|
8 |
Keep responses polite, engaging, and to the point. If unsure, politely suggest alternatives."""
|
9 |
|
10 |
-
MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-
|
11 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
# Page configuration
|
@@ -59,7 +59,8 @@ def configure_sidebar() -> Dict[str, Any]:
|
|
59 |
|
60 |
def format_prompt(system_message: str, user_input: str) -> str:
|
61 |
"""Format prompt according to model's required template"""
|
62 |
-
return f"""<|begin_of_sentence|>System: {system_message}
|
|
|
63 |
|
64 |
def generate_response(prompt: str, settings: Dict[str, Any]) -> str:
|
65 |
"""Generate response using local model"""
|
@@ -76,6 +77,8 @@ def generate_response(prompt: str, settings: Dict[str, Any]) -> str:
|
|
76 |
|
77 |
response = st.session_state.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
78 |
response = response.split("\n</think>\n")[0].strip()
|
|
|
|
|
79 |
return response.split("<|Assistant|>")[-1].strip()
|
80 |
|
81 |
def handle_chat_interaction(settings: Dict[str, Any]):
|
|
|
7 |
DEFAULT_SYSTEM_PROMPT = """You are a friendly Assistant. Provide clear, accurate, and brief answers.
|
8 |
Keep responses polite, engaging, and to the point. If unsure, politely suggest alternatives."""
|
9 |
|
10 |
+
MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B" # Directly specify model
|
11 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
# Page configuration
|
|
|
59 |
|
60 |
def format_prompt(system_message: str, user_input: str) -> str:
|
61 |
"""Format prompt according to model's required template"""
|
62 |
+
return f"""<|begin_of_sentence|>System: {system_message}
|
63 |
+
<|User|>{user_input}<|Assistant|>"""
|
64 |
|
65 |
def generate_response(prompt: str, settings: Dict[str, Any]) -> str:
|
66 |
"""Generate response using local model"""
|
|
|
77 |
|
78 |
response = st.session_state.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
79 |
response = response.split("\n</think>\n")[0].strip()
|
80 |
+
response = response.replace("<|User|>", "").strip()
|
81 |
+
response = response.replace("<|System|>", "").strip()
|
82 |
return response.split("<|Assistant|>")[-1].strip()
|
83 |
|
84 |
def handle_chat_interaction(settings: Dict[str, Any]):
|