salma-remyx commited on
Commit
bbbd1e2
·
1 Parent(s): e19b349

update system prompt

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -30,9 +30,11 @@ def gpu_inference(image_path: str, prompt: str) -> str:
30
 
31
  # Build conversation
32
  system_msg = (
33
- "You are VL-Thinking 🤔, a helpful assistant. "
34
- "Think through your reasoning then provide the answer. "
35
- "Wrap reasoning in <think>...</think> and final in <answer>...</answer>."
 
 
36
  )
37
  conversation = [
38
  {"role": "system", "content": [{"type": "text", "text": system_msg}]},
@@ -96,7 +98,7 @@ def inference_interface(history):
96
 
97
  def build_demo():
98
  with gr.Blocks() as demo:
99
- gr.Markdown("# SpaceThinker-Qwen2.5VL-3B Image Prompt Chatbot")
100
  chatbot = gr.Chatbot([], type="messages", label="Conversation")
101
  chat_input = gr.MultimodalTextbox(
102
  interactive=True,
 
30
 
31
  # Build conversation
32
  system_msg = (
33
+ "You are a Vision Language Model specialized in interpreting visual data from images.\n"
34
+ "Your task is to analyze the provided image and respond to queries with concise answers, "
35
+ "usually a single word, number, or short phrase.\n"
36
+ "Focus on delivering accurate, succinct answers based on the visual information. Avoid "
37
+ "additional explanation unless absolutely necessary."
38
  )
39
  conversation = [
40
  {"role": "system", "content": [{"type": "text", "text": system_msg}]},
 
98
 
99
  def build_demo():
100
  with gr.Blocks() as demo:
101
+ gr.Markdown("# SpaceThinker-Qwen2.5VL-3B")
102
  chatbot = gr.Chatbot([], type="messages", label="Conversation")
103
  chat_input = gr.MultimodalTextbox(
104
  interactive=True,