michailroussos commited on
Commit
bc4a9b2
·
1 Parent(s): 99b9339
Files changed (1) hide show
  1. app.py +15 -22
app.py CHANGED
@@ -16,7 +16,6 @@ FastLanguageModel.for_inference(model) # Enable optimized inference
16
 
17
  # Define the response function
18
  def respond(message, history, system_message, max_tokens, temperature, top_p):
19
- # Extensive debugging print statements
20
  print("\n" + "="*50)
21
  print("===== RESPOND FUNCTION CALLED =====")
22
  print("="*50)
@@ -39,17 +38,8 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
39
  if history:
40
  print("\n--- Processing Existing History ---")
41
  for entry in history:
42
- print(f"Processing entry: {entry}")
43
- # Ensure entry is a dictionary with 'user' and 'assistant' keys
44
- if isinstance(entry, dict):
45
- messages.append({"role": "user", "content": entry.get('user', '')})
46
- messages.append({"role": "assistant", "content": entry.get('assistant', '')})
47
- elif isinstance(entry, list) and len(entry) == 2:
48
- # Handle case where history might be a list of [user, assistant]
49
- messages.append({"role": "user", "content": entry[0]})
50
- messages.append({"role": "assistant", "content": entry[1]})
51
- else:
52
- print(f"WARNING: Unexpected history entry format: {entry}")
53
 
54
  # Add the current user message
55
  print("\n--- Adding Current Message ---")
@@ -95,18 +85,21 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
95
  print("WARNING: System message detected in response")
96
  response = "Hello! How can I assist you today?"
97
 
98
- # Prepare return history
99
- return_history = (history or []) + [
100
- {"user": message, "assistant": response}
101
- ]
 
102
 
103
- print("\n--- Return History ---")
104
- print(f"Return History Length: {len(return_history)}")
105
- for entry in return_history:
106
- print(f"User: {entry['user']}")
107
- print(f"Assistant: {entry['assistant'][:100]}...") # Truncate long responses
108
 
109
- return return_history
 
 
 
 
110
 
111
  except Exception as gen_error:
112
  print("\n--- GENERATION ERROR ---")
 
16
 
17
  # Define the response function
18
  def respond(message, history, system_message, max_tokens, temperature, top_p):
 
19
  print("\n" + "="*50)
20
  print("===== RESPOND FUNCTION CALLED =====")
21
  print("="*50)
 
38
  if history:
39
  print("\n--- Processing Existing History ---")
40
  for entry in history:
41
+ messages.append({"role": "user", "content": entry[0]})
42
+ messages.append({"role": "assistant", "content": entry[1]})
 
 
 
 
 
 
 
 
 
43
 
44
  # Add the current user message
45
  print("\n--- Adding Current Message ---")
 
85
  print("WARNING: System message detected in response")
86
  response = "Hello! How can I assist you today?"
87
 
88
+ # Prepare return history in OpenAI messages format
89
+ return_messages = []
90
+ for entry in (history or []):
91
+ return_messages.append({"role": "user", "content": entry[0]})
92
+ return_messages.append({"role": "assistant", "content": entry[1]})
93
 
94
+ # Add current conversation turn
95
+ return_messages.append({"role": "user", "content": message})
96
+ return_messages.append({"role": "assistant", "content": response})
 
 
97
 
98
+ print("\n--- Return Messages ---")
99
+ for msg in return_messages:
100
+ print(f"Role: {msg['role']}, Content: {msg['content'][:100]}...")
101
+
102
+ return return_messages
103
 
104
  except Exception as gen_error:
105
  print("\n--- GENERATION ERROR ---")