File size: 13,613 Bytes
949f8bc
e7761b5
51cbadd
949f8bc
f388c93
949f8bc
f388c93
949f8bc
 
f388c93
 
 
 
6b10944
949f8bc
12d4886
f388c93
 
12d4886
f388c93
949f8bc
f388c93
 
 
 
e7761b5
 
949f8bc
 
 
e7761b5
 
f388c93
 
e7761b5
 
f388c93
2ef19ee
c2c3e4e
e7761b5
2ef19ee
 
a0afec0
f388c93
 
 
e7761b5
 
 
51cbadd
e7761b5
 
 
 
 
 
 
f388c93
 
 
949f8bc
f388c93
51cbadd
e79be93
51cbadd
e79be93
e7761b5
6b10944
 
e7761b5
12d4886
 
01e07c4
 
12d4886
 
 
 
 
 
 
6b10944
12d4886
e79be93
e7761b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
949f8bc
e7761b5
 
 
 
 
 
 
 
 
 
 
 
 
51cbadd
e7761b5
61f5a5c
51cbadd
6b10944
 
 
 
 
 
 
 
f388c93
 
949f8bc
e7761b5
f388c93
e7761b5
949f8bc
2ef19ee
93f4a81
2ef19ee
e7761b5
 
 
2ef19ee
e7761b5
 
2ef19ee
e7761b5
 
 
 
 
949f8bc
2ef19ee
949f8bc
2ef19ee
e7761b5
2ef19ee
e7761b5
949f8bc
e7761b5
 
 
 
2ef19ee
e7761b5
 
 
 
 
 
 
 
2ef19ee
 
e7761b5
 
 
 
 
 
 
 
949f8bc
e7761b5
 
 
949f8bc
 
 
 
 
 
 
 
e7761b5
949f8bc
 
e7761b5
949f8bc
 
 
 
 
 
 
 
 
 
 
 
 
 
e7761b5
 
 
 
 
949f8bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0afec0
2ef19ee
949f8bc
 
 
e7761b5
 
949f8bc
e7761b5
2ef19ee
f388c93
e7761b5
 
 
 
949f8bc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
# --- START OF CORRECTED app.py (v3 - Fixes AttributeError) ---

from flask import Flask, render_template, request, jsonify, Response, stream_with_context
# Revert to the original google.genai import and usage
from google import genai
# Make sure types is imported from google.genai if needed for specific model config
from google.genai import types
# Correct import for GoogleAPIError with the original genai client
from google.api_core.exceptions import GoogleAPIError # <-- IMPORTATION CORRIGÉE
import os
from PIL import Image
import io
import base64
import json
import re

app = Flask(__name__)

GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")

# Use the original client initialization
client = genai.Client(
    api_key=GOOGLE_API_KEY,
)

# Ensure API key is available (good practice)
if not GOOGLE_API_KEY:
    print("WARNING: GEMINI_API_KEY environment variable not set.")
    # Handle this case appropriately, e.g., exit or show an error on the page
    # In a real application, you might want to raise an error or redirect

# --- Routes for index and potentially the Pro version (kept for context) ---
@app.route('/')
def index():
    # Assuming index.html is for the Pro version or another page
    return render_template('index.html') # Or redirect to /free if it's the main page

@app.route('/free')
def indexx():
    # This route serves the free version HTML
    return render_template('maj.html')

# --- Original /solve route (Pro version, streaming) - Kept as is ---
@app.route('/solve', methods=['POST'])
def solve():
    try:
        if 'image' not in request.files or not request.files['image'].filename:
            return jsonify({'error': 'No image file provided'}), 400

        image_data = request.files['image'].read()
        if not image_data:
            return jsonify({'error': 'Empty image file provided'}), 400

        try:
            img = Image.open(io.BytesIO(image_data))
        except Exception as img_err:
             return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400

        buffered = io.BytesIO()
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode()

        def generate():
            mode = 'starting'
            try:
                response = client.models.generate_content_stream(
                    model="gemini-2.5-pro-exp-03-25", # Your original model name
                    contents=[
                        {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
                        """Résous cet exercice en français avec du LaTeX.
                        Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
                        Présente ta solution de façon claire et espacée."""
                    ],
                    config=types.GenerateContentConfig(
                        thinking_config=types.ThinkingConfig(
                            thinking_budget=8000
                        ),
                        tools=[types.Tool(
                            code_execution=types.ToolCodeExecution()
                        )]
                    )
                )

                for chunk in response:
                    if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
                        for part in chunk.candidates[0].content.parts:
                            if hasattr(part, 'thought') and part.thought:
                                if mode != "thinking":
                                    yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
                                    mode = "thinking"
                            elif hasattr(part, 'executable_code') and part.executable_code:
                                if mode != "executing_code":
                                    yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
                                    mode = "executing_code"
                                code_block_open = "```python\n"
                                code_block_close = "\n```"
                                yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
                            elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                                if mode != "code_result":
                                    yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
                                    mode = "code_result"
                                result_block_open = "Résultat d'exécution:\n```\n"
                                result_block_close = "\n```"
                                yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
                            else: # Assuming it's text
                                if mode != "answering":
                                    yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
                                    mode = "answering"
                                if hasattr(part, 'text') and part.text:
                                    yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
                    # Handle cases where a chunk might not have candidates/parts, or handle errors
                    elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
                        error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
                        print(error_msg)
                        yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
                        break # Stop processing on block
                    elif chunk.candidates and chunk.candidates[0].finish_reason:
                        finish_reason = chunk.candidates[0].finish_reason.name
                        if finish_reason != 'STOP':
                            error_msg = f"Generation finished early: {finish_reason}"
                            print(error_msg)
                            yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
                        break # Stop processing on finish reason

            except Exception as e:
                print(f"Error during streaming generation: {e}")
                yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'

        return Response(
            stream_with_context(generate()),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no'
            }
        )

    except Exception as e:
        print(f"Error in /solve endpoint: {e}")
        return jsonify({'error': f'Failed to process request: {str(e)}'}), 500


# --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
@app.route('/solved', methods=['POST'])
def solved():
    try:
        if 'image' not in request.files or not request.files['image'].filename:
            return jsonify({'error': 'No image file provided'}), 400

        image_data = request.files['image'].read()
        if not image_data:
            return jsonify({'error': 'Empty image file provided'}), 400

        try:
            img = Image.open(io.BytesIO(image_data))
        except Exception as img_err:
            return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400

        buffered = io.BytesIO() # Corrected spelling BytesBytesIO -> BytesIO
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode()

        model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name

        contents = [
             {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
            """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
            Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
            Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
        ]

        response = client.models.generate_content(
            model=model_name,
            contents=contents,
            config=types.GenerateContentConfig(
                tools=[types.Tool(
                    code_execution=types.ToolCodeExecution()
                )]
            )
        )

        full_solution = ""
        # Check if the response has candidates and parts
        if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
            for part in response.candidates[0].content.parts:
                if hasattr(part, 'text') and part.text:
                    full_solution += part.text
                elif hasattr(part, 'executable_code') and part.executable_code:
                    full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
                # Check for the result attribute name based on your SDK version's structure
                elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
                     output_str = part.code_execution_result.output
                     full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
                 # Check for prompt_feedback on the response object for non-streaming
            if response.prompt_feedback and response.prompt_feedback.block_reason:
                block_reason = response.prompt_feedback.block_reason.name
                # Add block reason to the solution or handle as error
                if not full_solution.strip(): # If no other content generated
                     full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
                else: # If some content was generated before blocking
                     full_solution += f"\n\n**Attention:** La réponse a pu être incomplète car le contenu a été bloqué: {block_reason}."


        # Ensure we have some content, otherwise return a message or specific error
        if not full_solution.strip():
             # Check for finish reasons on candidates
             finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
             # safety_ratings = response.candidates[0].safety_ratings if response.candidates else [] # You could log or use these
             print(f"Generation finished with reason (no content): {finish_reason}")
             if finish_reason == 'SAFETY':
                 full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
             elif finish_reason == 'RECITATION':
                  full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
             elif finish_reason == 'OTHER' or finish_reason == 'UNKNOWN': # Catch general failures
                 full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
             # If finish_reason is 'STOP' but no content, the generic message below applies

             if not full_solution.strip(): # Fallback if reason didn't give a specific message
                 full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."


        # Return the complete solution as JSON
        return jsonify({'solution': full_solution.strip()})

    # Catch specific API errors from google.api_core.exceptions
    except GoogleAPIError as api_error: # <-- UTILISATION CORRIGÉE
        print(f"GenAI API Error: {api_error}")
        # Provide more user-friendly error messages based on potential API errors
        error_message = str(api_error)
        if "RESOURCE_EXHAUSTED" in error_message:
             user_error = "Vous avez atteint votre quota d'utilisation de l'API. Veuillez réessayer plus tard ou vérifier votre console Google Cloud."
        elif "400 Bad Request" in error_message or "INVALID_ARGUMENT" in error_message:
             user_error = f"La requête à l'API est invalide : {error_message}. L'image n'a peut-être pas été comprise."
        elif "403 Forbidden" in error_message or "PERMISSION_DENIED" in error_message:
             user_error = "Erreur d'authentification ou de permissions avec l'API. Vérifiez votre clé API."
        elif "50" in error_message: # Catch 5xx errors
             user_error = f"Erreur serveur de l'API : {error_message}. Veuillez réessayer plus tard."
        else:
             user_error = f'Erreur de l\'API GenAI: {error_message}'

        return jsonify({'error': user_error}), api_error.code if hasattr(api_error, 'code') else 500 # Return appropriate status code if available

    except Exception as e:
        # Log the full error for debugging
        import traceback
        print(f"Error in /solved endpoint: {e}")
        print(traceback.format_exc())
        # Provide a generic error message to the user
        return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500


if __name__ == '__main__':
    # Set host='0.0.0.0' to make it accessible on your network if needed
    # Remove debug=True in production
    app.run(debug=True, host='0.0.0.0', port=5000) # Example port

# --- END OF CORRECTED app.py (v3 - Fixes AttributeError) ---