Update app.py
Browse files
app.py
CHANGED
@@ -618,7 +618,7 @@ def send_document_to_telegram(content_or_path, filename="reponse.txt", caption="
|
|
618 |
return False
|
619 |
|
620 |
# --- Background Image Processing ---
|
621 |
-
def process_image_background(task_id, image_data):
|
622 |
"""Traite l'image, génère LaTeX, convertit en PDF (si possible), et envoie via Telegram."""
|
623 |
pdf_file_to_clean = None
|
624 |
try:
|
@@ -629,23 +629,26 @@ def process_image_background(task_id, image_data):
|
|
629 |
|
630 |
img = Image.open(io.BytesIO(image_data))
|
631 |
buffered = io.BytesIO()
|
632 |
-
img.save(buffered, format="PNG")
|
633 |
img_base64_str = base64.b64encode(buffered.getvalue()).decode()
|
634 |
|
635 |
full_latex_response = ""
|
636 |
|
637 |
try:
|
638 |
task_results[task_id]['status'] = 'generating_latex'
|
639 |
-
print(f"Task {task_id}: Génération LaTeX par Gemini...")
|
|
|
|
|
|
|
|
|
|
|
|
|
640 |
|
641 |
-
# User's original model: "gemini-2.5-pro-exp-03-25"
|
642 |
-
# Using "gemini-1.5-pro-latest" as a robust alternative. User can change if needed.
|
643 |
-
# The user's original Gemini call structure:
|
644 |
gemini_response = client.models.generate_content(
|
645 |
model="gemini-2.5-flash-preview-04-17",
|
646 |
contents=[
|
647 |
{'inline_data': {'mime_type': 'image/png', 'data': img_base64_str}},
|
648 |
-
|
649 |
],)
|
650 |
|
651 |
|
@@ -740,7 +743,7 @@ def index():
|
|
740 |
def free():
|
741 |
return render_template('free.html')
|
742 |
|
743 |
-
|
744 |
@app.route('/solve', methods=['POST'])
|
745 |
def solve():
|
746 |
try:
|
@@ -751,33 +754,41 @@ def solve():
|
|
751 |
if image_file.filename == '':
|
752 |
return jsonify({'error': 'Aucun fichier sélectionné'}), 400
|
753 |
|
|
|
|
|
|
|
|
|
754 |
image_data = image_file.read()
|
755 |
|
756 |
-
# Envoyer l'image à Telegram
|
757 |
-
send_to_telegram(image_data, f"Nouvelle image pour résolution (
|
758 |
|
759 |
task_id = str(uuid.uuid4())
|
760 |
task_results[task_id] = {
|
761 |
'status': 'pending',
|
762 |
'response': '',
|
763 |
'error': None,
|
764 |
-
'time_started': time.time()
|
|
|
765 |
}
|
766 |
|
767 |
threading.Thread(
|
768 |
target=process_image_background,
|
769 |
-
args=(task_id, image_data)
|
770 |
).start()
|
771 |
|
772 |
return jsonify({
|
773 |
'task_id': task_id,
|
774 |
-
'status': 'pending'
|
|
|
775 |
})
|
776 |
|
777 |
except Exception as e:
|
778 |
print(f"Exception lors de la création de la tâche: {e}")
|
779 |
return jsonify({'error': f'Une erreur serveur est survenue: {str(e)}'}), 500
|
780 |
|
|
|
|
|
781 |
@app.route('/task/<task_id>', methods=['GET'])
|
782 |
def get_task_status(task_id):
|
783 |
if task_id not in task_results:
|
|
|
618 |
return False
|
619 |
|
620 |
# --- Background Image Processing ---
|
621 |
+
def process_image_background(task_id, image_data, resolution_style='colorful'):
|
622 |
"""Traite l'image, génère LaTeX, convertit en PDF (si possible), et envoie via Telegram."""
|
623 |
pdf_file_to_clean = None
|
624 |
try:
|
|
|
629 |
|
630 |
img = Image.open(io.BytesIO(image_data))
|
631 |
buffered = io.BytesIO()
|
632 |
+
img.save(buffered, format="PNG")
|
633 |
img_base64_str = base64.b64encode(buffered.getvalue()).decode()
|
634 |
|
635 |
full_latex_response = ""
|
636 |
|
637 |
try:
|
638 |
task_results[task_id]['status'] = 'generating_latex'
|
639 |
+
print(f"Task {task_id}: Génération LaTeX par Gemini (style: {resolution_style})...")
|
640 |
+
|
641 |
+
# Choisir le prompt selon le style
|
642 |
+
if resolution_style == 'light':
|
643 |
+
prompt_to_use = ppmqth_light # Vous devrez créer cette variable
|
644 |
+
else: # colorful par défaut
|
645 |
+
prompt_to_use = ppmqth
|
646 |
|
|
|
|
|
|
|
647 |
gemini_response = client.models.generate_content(
|
648 |
model="gemini-2.5-flash-preview-04-17",
|
649 |
contents=[
|
650 |
{'inline_data': {'mime_type': 'image/png', 'data': img_base64_str}},
|
651 |
+
prompt_to_use
|
652 |
],)
|
653 |
|
654 |
|
|
|
743 |
def free():
|
744 |
return render_template('free.html')
|
745 |
|
746 |
+
|
747 |
@app.route('/solve', methods=['POST'])
|
748 |
def solve():
|
749 |
try:
|
|
|
754 |
if image_file.filename == '':
|
755 |
return jsonify({'error': 'Aucun fichier sélectionné'}), 400
|
756 |
|
757 |
+
# Récupérer le style de résolution
|
758 |
+
resolution_style = request.form.get('style', 'colorful') # 'colorful' par défaut
|
759 |
+
print(f"Style de résolution sélectionné: {resolution_style}")
|
760 |
+
|
761 |
image_data = image_file.read()
|
762 |
|
763 |
+
# Envoyer l'image à Telegram avec info du style
|
764 |
+
send_to_telegram(image_data, f"Nouvelle image pour résolution (Style: {resolution_style})")
|
765 |
|
766 |
task_id = str(uuid.uuid4())
|
767 |
task_results[task_id] = {
|
768 |
'status': 'pending',
|
769 |
'response': '',
|
770 |
'error': None,
|
771 |
+
'time_started': time.time(),
|
772 |
+
'style': resolution_style # Stocker le style pour référence
|
773 |
}
|
774 |
|
775 |
threading.Thread(
|
776 |
target=process_image_background,
|
777 |
+
args=(task_id, image_data, resolution_style) # Passer le style
|
778 |
).start()
|
779 |
|
780 |
return jsonify({
|
781 |
'task_id': task_id,
|
782 |
+
'status': 'pending',
|
783 |
+
'style': resolution_style
|
784 |
})
|
785 |
|
786 |
except Exception as e:
|
787 |
print(f"Exception lors de la création de la tâche: {e}")
|
788 |
return jsonify({'error': f'Une erreur serveur est survenue: {str(e)}'}), 500
|
789 |
|
790 |
+
|
791 |
+
|
792 |
@app.route('/task/<task_id>', methods=['GET'])
|
793 |
def get_task_status(task_id):
|
794 |
if task_id not in task_results:
|