Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
@@ -76,6 +76,10 @@ def interact(user_input, history, interaction_count):
|
|
76 |
if tokenizer is None or model is None:
|
77 |
raise ValueError("Tokenizer or model is not initialized.")
|
78 |
|
|
|
|
|
|
|
|
|
79 |
messages = history + [{"role": "user", "content": user_input}]
|
80 |
|
81 |
# Ensure roles alternate correctly
|
@@ -88,11 +92,6 @@ def interact(user_input, history, interaction_count):
|
|
88 |
# Check if the maximum number of interactions has been reached
|
89 |
interaction_count += 1
|
90 |
print(f"Interaction count: {interaction_count}") # Print the interaction count
|
91 |
-
if interaction_count >= MAX_INTERACTIONS:
|
92 |
-
farewell_message = "Thank you for the conversation! Have a great day!"
|
93 |
-
history.append({"role": "assistant", "content": farewell_message})
|
94 |
-
formatted_history = [(entry["content"], None) if entry["role"] == "user" else (None, entry["content"]) for entry in history if entry["role"] in ["user", "assistant"]]
|
95 |
-
return "", formatted_history, history, interaction_count
|
96 |
|
97 |
# Generate response using selected model
|
98 |
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to("cuda")
|
@@ -106,7 +105,7 @@ def interact(user_input, history, interaction_count):
|
|
106 |
formatted_history = [(entry["content"], None) if entry["role"] == "user" else (None, entry["content"]) for entry in history if entry["role"] in ["user", "assistant"]]
|
107 |
return "", formatted_history, history, interaction_count
|
108 |
except Exception as e:
|
109 |
-
if torch.cuda.
|
110 |
torch.cuda.empty_cache()
|
111 |
print(f"Error during interaction: {e}")
|
112 |
raise gr.Error(f"An error occurred during interaction: {str(e)}")
|
|
|
76 |
if tokenizer is None or model is None:
|
77 |
raise ValueError("Tokenizer or model is not initialized.")
|
78 |
|
79 |
+
# Concatenate a final message if max interactions are reached
|
80 |
+
if interaction_count >= MAX_INTERACTIONS - 1:
|
81 |
+
user_input += " This is the last message. Please respond accordingly."
|
82 |
+
|
83 |
messages = history + [{"role": "user", "content": user_input}]
|
84 |
|
85 |
# Ensure roles alternate correctly
|
|
|
92 |
# Check if the maximum number of interactions has been reached
|
93 |
interaction_count += 1
|
94 |
print(f"Interaction count: {interaction_count}") # Print the interaction count
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
# Generate response using selected model
|
97 |
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to("cuda")
|
|
|
105 |
formatted_history = [(entry["content"], None) if entry["role"] == "user" else (None, entry["content"]) for entry in history if entry["role"] in ["user", "assistant"]]
|
106 |
return "", formatted_history, history, interaction_count
|
107 |
except Exception as e:
|
108 |
+
if torch.cuda.is available():
|
109 |
torch.cuda.empty_cache()
|
110 |
print(f"Error during interaction: {e}")
|
111 |
raise gr.Error(f"An error occurred during interaction: {str(e)}")
|