simonraj commited on
Commit
d4704ef
Β·
verified Β·
1 Parent(s): 7d501a5

Update app.py

Browse files

system prompt updated

Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -59,14 +59,14 @@ def bot_streaming(message, history):
59
  conversation.extend([{"role": "user", "content": ""}])
60
  continue
61
  if flag == True:
62
- conversation[0]['content'] = f"<|image_1|>\n{user}"
63
  conversation.extend([{"role": "assistant", "content": assistant}])
64
  flag = False
65
  continue
66
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
67
 
68
  if len(history) == 0:
69
- conversation.append({"role": "user", "content": f"<|image_1|>\n{message['text']}"})
70
  else:
71
  conversation.append({"role": "user", "content": message['text']})
72
  print(f"prompt is -\n{conversation}")
@@ -74,10 +74,18 @@ def bot_streaming(message, history):
74
  image = Image.open(image)
75
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
76
 
 
 
 
 
 
 
 
 
77
  streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces': False,})
78
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=280, do_sample=False, temperature=0.0, eos_token_id=processor.tokenizer.eos_token_id,)
79
 
80
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
81
  thread.start()
82
 
83
  buffer = ""
@@ -85,6 +93,7 @@ def bot_streaming(message, history):
85
  buffer += new_text
86
  yield buffer
87
 
 
88
  chatbot = gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
89
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
90
  with gr.Blocks(fill_height=True,) as demo:
 
59
  conversation.extend([{"role": "user", "content": ""}])
60
  continue
61
  if flag == True:
62
+ conversation[0]['content'] = f"\n{user}"
63
  conversation.extend([{"role": "assistant", "content": assistant}])
64
  flag = False
65
  continue
66
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
67
 
68
  if len(history) == 0:
69
+ conversation.append({"role": "user", "content": f"\n{message['text']}"})
70
  else:
71
  conversation.append({"role": "user", "content": message['text']})
72
  print(f"prompt is -\n{conversation}")
 
74
  image = Image.open(image)
75
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
76
 
77
+ # Custom prompt to ensure responses are in Arnold's style
78
+ system_prompt = (
79
+ "As Arnold Schwarzenegger, analyze the image to identify the exercise being performed. "
80
+ "Provide detailed coaching tips to improve the form, focusing on posture and common errors. "
81
+ "Use motivational and energetic language. If the image does not show an exercise, respond with: "
82
+ "'What are you doing? This is no time for games! Upload a real exercise picture and let's pump it up!'"
83
+ )
84
+
85
  streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces': False,})
86
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=280, do_sample=False, temperature=0.0, eos_token_id=processor.tokenizer.eos_token_id,)
87
 
88
+ thread = Thread(target=model.generate, kwargs=generation_kwargs, args=(system_prompt,))
89
  thread.start()
90
 
91
  buffer = ""
 
93
  buffer += new_text
94
  yield buffer
95
 
96
+
97
  chatbot = gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
98
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
99
  with gr.Blocks(fill_height=True,) as demo: