Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -58,6 +58,27 @@ class ModelManager:
|
|
58 |
inputs = self.current_tokenizer(prompt, return_tensors="pt").to(self.device)
|
59 |
return inputs
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
@spaces.GPU
|
62 |
def generate_response(model_name, system_instruction, user_input):
|
63 |
"""Generate response with GPU support and JSON formatting"""
|
|
|
58 |
inputs = self.current_tokenizer(prompt, return_tensors="pt").to(self.device)
|
59 |
return inputs
|
60 |
|
61 |
+
|
62 |
+
# Initialize model manager
|
63 |
+
model_manager = ModelManager()
|
64 |
+
|
65 |
+
# Default system message for JSON output
|
66 |
+
default_system_message = """You are a helpful AI assistant. You must ALWAYS return your response in valid JSON format.
|
67 |
+
Each response should be formatted as follows:
|
68 |
+
{
|
69 |
+
"response": {
|
70 |
+
"main_answer": "Your primary response here",
|
71 |
+
"additional_details": "Any additional information or context",
|
72 |
+
"confidence": 0.0 to 1.0,
|
73 |
+
"tags": ["relevant", "tags", "here"]
|
74 |
+
},
|
75 |
+
"metadata": {
|
76 |
+
"response_type": "type of response",
|
77 |
+
"source": "basis of response if applicable"
|
78 |
+
}
|
79 |
+
}
|
80 |
+
Ensure EVERY response strictly follows this JSON structure."""
|
81 |
+
|
82 |
@spaces.GPU
|
83 |
def generate_response(model_name, system_instruction, user_input):
|
84 |
"""Generate response with GPU support and JSON formatting"""
|