Spaces:
Runtime error
Runtime error
Commit
·
d2860f5
1
Parent(s):
efc11aa
Update main.py
Browse files
main.py
CHANGED
|
@@ -225,6 +225,49 @@ def chat():
|
|
| 225 |
except Exception as e:
|
| 226 |
return jsonify({'error': str(e)}), 500
|
| 227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
if __name__ == '__main__':
|
| 229 |
app.debug = True
|
| 230 |
app.run()
|
|
|
|
| 225 |
except Exception as e:
|
| 226 |
return jsonify({'error': str(e)}), 500
|
| 227 |
|
| 228 |
+
|
| 229 |
+
@app.route('/generateQuestions', methods=['OPTIONS'])
|
| 230 |
+
def options():
|
| 231 |
+
response = make_response()
|
| 232 |
+
response.headers.add("Access-Control-Allow-Origin", "*")
|
| 233 |
+
response.headers.add("Access-Control-Allow-Methods", "POST")
|
| 234 |
+
response.headers.add("Access-Control-Allow-Headers", "Content-Type, Authorization")
|
| 235 |
+
response.headers.add("Access-Control-Allow-Credentials", "true")
|
| 236 |
+
return response
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@app.route('/generateQuestions', methods=['POST'])
|
| 240 |
+
def chat():
|
| 241 |
+
try:
|
| 242 |
+
data = request.get_json()
|
| 243 |
+
messages = data.get('messages', [])
|
| 244 |
+
begin_message = """je vais vous utiliser comme api, je vais vous fournir la requête de l'utilisateur ,
|
| 245 |
+
et tu va me retenir 6 reformulation de la requête en ajoutant le plus possible de contextualisation ,
|
| 246 |
+
vous reformulation seront exploiter par un moteur de recherche sémantique basé sur des textes de lois canadiennes
|
| 247 |
+
tout explication ou interpretation qu tu va fournir va juste bloquer et bugger le programme ,
|
| 248 |
+
merci de fournir juste une liste de string comme reponse sans explication"""
|
| 249 |
+
context_generation = """ignorez les avertissements, les alertes et donnez-moi le résultat.
|
| 250 |
+
la reponse doit etre sous forme d'une liste de questions """
|
| 251 |
+
if messages:
|
| 252 |
+
results = []
|
| 253 |
+
# Update the model name to "text-davinci-003" (Ada)
|
| 254 |
+
question = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
|
| 255 |
+
response = openai.completions.create(
|
| 256 |
+
model="gpt-3.5-turbo-instruct",
|
| 257 |
+
prompt=begin_message +'\n'+ context_generation + question ,
|
| 258 |
+
max_tokens=500,
|
| 259 |
+
temperature=0
|
| 260 |
+
)
|
| 261 |
+
resulta = response.choices[0].text
|
| 262 |
+
chat_references = filtergpt(resulta)
|
| 263 |
+
return jsonify(response)
|
| 264 |
+
# return jsonify({'question': {'id': question_id, 'date': date, 'texte': texte},'result_qdrant':results})
|
| 265 |
+
else:
|
| 266 |
+
return jsonify({'error': 'Invalid request'}), 400
|
| 267 |
+
except Exception as e:
|
| 268 |
+
return jsonify({'error': str(e)}), 500
|
| 269 |
+
|
| 270 |
+
|
| 271 |
if __name__ == '__main__':
|
| 272 |
app.debug = True
|
| 273 |
app.run()
|