Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -146,8 +146,18 @@ def user_input(user_question, api_key):
|
|
146 |
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
|
147 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
# Tokenize the prompt
|
150 |
-
inputs = tokenizer(prompt_template
|
151 |
|
152 |
# Generate the transformed response using the Hugging Face model
|
153 |
outputs = model.generate(**inputs)
|
@@ -156,10 +166,8 @@ def user_input(user_question, api_key):
|
|
156 |
transformed_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
157 |
|
158 |
# Display the transformed response
|
159 |
-
st.write("Reply: ",
|
160 |
|
161 |
-
# Update chat history
|
162 |
-
update_chat_history(user_question, transformed_response)
|
163 |
|
164 |
|
165 |
|
|
|
146 |
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
|
147 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
148 |
|
149 |
+
# Define the prompt template
|
150 |
+
prompt_template = f"""
|
151 |
+
Transform the following response into a more conversational tone without adding new information:
|
152 |
+
|
153 |
+
Response:
|
154 |
+
{response_gemini["output_text"]}
|
155 |
+
|
156 |
+
Transformed Response:
|
157 |
+
"""
|
158 |
+
|
159 |
# Tokenize the prompt
|
160 |
+
inputs = tokenizer(prompt_template, return_tensors="pt", max_length=100, truncation=True)
|
161 |
|
162 |
# Generate the transformed response using the Hugging Face model
|
163 |
outputs = model.generate(**inputs)
|
|
|
166 |
transformed_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
167 |
|
168 |
# Display the transformed response
|
169 |
+
st.write("Reply: ", transforme
|
170 |
|
|
|
|
|
171 |
|
172 |
|
173 |
|