michailroussos commited on
Commit
35ddf38
·
1 Parent(s): 1f11a55
Files changed (1) hide show
  1. app.py +15 -18
app.py CHANGED
@@ -32,14 +32,18 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
32
  print(f"History Type: {type(history)}")
33
  print(f"History Content: {history}")
34
 
35
- # Prepare the messages for the model
36
- messages = []
 
37
  try:
38
  if history:
39
  print("\n--- Processing Existing History ---")
40
  for entry in history:
41
- messages.append({"role": "user", "content": entry[0]})
42
- messages.append({"role": "assistant", "content": entry[1]})
 
 
 
43
 
44
  # Add the current user message
45
  print("\n--- Adding Current Message ---")
@@ -48,7 +52,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
48
  # Debug messages before tokenization
49
  print("\n--- Messages Before Tokenization ---")
50
  for msg in messages:
51
- print(f"Role: {msg['role']}, Content: {msg['content']}")
52
 
53
  # Tokenize the input
54
  print("\n--- Tokenizing Input ---")
@@ -80,20 +84,13 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
80
  print("\n--- Generated Response ---")
81
  print(f"Raw Response: {response}")
82
 
83
- # Check and filter response
84
- #if "system" in response.lower():
85
- # print("WARNING: System message detected in response")
86
- # response = "Hello! How can I assist you today?"
87
-
88
  # Prepare return history in OpenAI messages format
89
- return_messages = []
90
- for entry in (history or []):
91
- return_messages.append({"role": "user", "content": entry[0]})
92
- return_messages.append({"role": "assistant", "content": entry[1]})
93
 
94
- # Add current conversation turn
95
- return_messages.append({"role": "user", "content": message})
96
- return_messages.append({"role": "assistant", "content": response})
97
 
98
  print("\n--- Return Messages ---")
99
  for msg in return_messages:
@@ -124,4 +121,4 @@ demo = gr.ChatInterface(
124
  )
125
 
126
  if __name__ == "__main__":
127
- demo.launch(share=False) # Use share=False for local testing
 
32
  print(f"History Type: {type(history)}")
33
  print(f"History Content: {history}")
34
 
35
+ # Ensure history is formatted as a list of dictionaries
36
+ messages = [{"role": "system", "content": system_message}] # Add system message at the start
37
+
38
  try:
39
  if history:
40
  print("\n--- Processing Existing History ---")
41
  for entry in history:
42
+ # Ensure each history entry is in the correct format
43
+ if isinstance(entry, dict) and 'role' in entry and 'content' in entry:
44
+ messages.append(entry)
45
+ else:
46
+ print(f"Skipping malformed history entry: {entry}")
47
 
48
  # Add the current user message
49
  print("\n--- Adding Current Message ---")
 
52
  # Debug messages before tokenization
53
  print("\n--- Messages Before Tokenization ---")
54
  for msg in messages:
55
+ print(f"Role: {msg['role']}, Content: {msg['content'][:100]}...")
56
 
57
  # Tokenize the input
58
  print("\n--- Tokenizing Input ---")
 
84
  print("\n--- Generated Response ---")
85
  print(f"Raw Response: {response}")
86
 
 
 
 
 
 
87
  # Prepare return history in OpenAI messages format
88
+ return_messages = [{"role": "user", "content": message},
89
+ {"role": "assistant", "content": response}]
 
 
90
 
91
+ # Add previous conversation turns if any
92
+ for entry in (history or []):
93
+ return_messages.insert(0, {"role": entry['role'], "content": entry['content']})
94
 
95
  print("\n--- Return Messages ---")
96
  for msg in return_messages:
 
121
  )
122
 
123
  if __name__ == "__main__":
124
+ demo.launch(share=False) # Use share=False for local testing