Spaces:
Runtime error
Runtime error
michailroussos
commited on
Commit
·
bc4a9b2
1
Parent(s):
99b9339
more
Browse files
app.py
CHANGED
@@ -16,7 +16,6 @@ FastLanguageModel.for_inference(model) # Enable optimized inference
|
|
16 |
|
17 |
# Define the response function
|
18 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
19 |
-
# Extensive debugging print statements
|
20 |
print("\n" + "="*50)
|
21 |
print("===== RESPOND FUNCTION CALLED =====")
|
22 |
print("="*50)
|
@@ -39,17 +38,8 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
39 |
if history:
|
40 |
print("\n--- Processing Existing History ---")
|
41 |
for entry in history:
|
42 |
-
|
43 |
-
|
44 |
-
if isinstance(entry, dict):
|
45 |
-
messages.append({"role": "user", "content": entry.get('user', '')})
|
46 |
-
messages.append({"role": "assistant", "content": entry.get('assistant', '')})
|
47 |
-
elif isinstance(entry, list) and len(entry) == 2:
|
48 |
-
# Handle case where history might be a list of [user, assistant]
|
49 |
-
messages.append({"role": "user", "content": entry[0]})
|
50 |
-
messages.append({"role": "assistant", "content": entry[1]})
|
51 |
-
else:
|
52 |
-
print(f"WARNING: Unexpected history entry format: {entry}")
|
53 |
|
54 |
# Add the current user message
|
55 |
print("\n--- Adding Current Message ---")
|
@@ -95,18 +85,21 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
95 |
print("WARNING: System message detected in response")
|
96 |
response = "Hello! How can I assist you today?"
|
97 |
|
98 |
-
# Prepare return history
|
99 |
-
|
100 |
-
|
101 |
-
|
|
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
print(f"User: {entry['user']}")
|
107 |
-
print(f"Assistant: {entry['assistant'][:100]}...") # Truncate long responses
|
108 |
|
109 |
-
|
|
|
|
|
|
|
|
|
110 |
|
111 |
except Exception as gen_error:
|
112 |
print("\n--- GENERATION ERROR ---")
|
|
|
16 |
|
17 |
# Define the response function
|
18 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
|
19 |
print("\n" + "="*50)
|
20 |
print("===== RESPOND FUNCTION CALLED =====")
|
21 |
print("="*50)
|
|
|
38 |
if history:
|
39 |
print("\n--- Processing Existing History ---")
|
40 |
for entry in history:
|
41 |
+
messages.append({"role": "user", "content": entry[0]})
|
42 |
+
messages.append({"role": "assistant", "content": entry[1]})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# Add the current user message
|
45 |
print("\n--- Adding Current Message ---")
|
|
|
85 |
print("WARNING: System message detected in response")
|
86 |
response = "Hello! How can I assist you today?"
|
87 |
|
88 |
+
# Prepare return history in OpenAI messages format
|
89 |
+
return_messages = []
|
90 |
+
for entry in (history or []):
|
91 |
+
return_messages.append({"role": "user", "content": entry[0]})
|
92 |
+
return_messages.append({"role": "assistant", "content": entry[1]})
|
93 |
|
94 |
+
# Add current conversation turn
|
95 |
+
return_messages.append({"role": "user", "content": message})
|
96 |
+
return_messages.append({"role": "assistant", "content": response})
|
|
|
|
|
97 |
|
98 |
+
print("\n--- Return Messages ---")
|
99 |
+
for msg in return_messages:
|
100 |
+
print(f"Role: {msg['role']}, Content: {msg['content'][:100]}...")
|
101 |
+
|
102 |
+
return return_messages
|
103 |
|
104 |
except Exception as gen_error:
|
105 |
print("\n--- GENERATION ERROR ---")
|