michailroussos commited on
Commit
37be440
·
1 Parent(s): 9202d9a
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -22,7 +22,7 @@ print("Model loaded successfully!")
22
  # Gradio Response Function
23
  from transformers import TextStreamer
24
 
25
- def respond(message, max_new_tokens, temperature, system_message=""):
26
  try:
27
  # Prepare input messages
28
  messages = [{"role": "system", "content": system_message}] if system_message else []
@@ -52,10 +52,13 @@ def respond(message, max_new_tokens, temperature, system_message=""):
52
  # Decode the generated tokens back to text
53
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
54
 
55
- # Debug: Show the generated text
56
- print("[DEBUG] Generated Text:", generated_text)
57
 
58
- return generated_text
 
 
 
59
 
60
  except Exception as e:
61
  # Debug: Log errors
 
22
  # Gradio Response Function
23
  from transformers import TextStreamer
24
 
25
+ def respond(message, max_new_tokens, temperature, system_message="You are a helpful assistant. You should reply to the user's message without repeating the input."):
26
  try:
27
  # Prepare input messages
28
  messages = [{"role": "system", "content": system_message}] if system_message else []
 
52
  # Decode the generated tokens back to text
53
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
54
 
55
+ # Clean up the response by removing unwanted parts (e.g., system and user info)
56
+ cleaned_response = generated_text.split("\n")[-1] # Assuming the response ends at the last line
57
 
58
+ # Debug: Show the cleaned response
59
+ print("[DEBUG] Cleaned Response:", cleaned_response)
60
+
61
+ return cleaned_response
62
 
63
  except Exception as e:
64
  # Debug: Log errors