Mariam-cards / app.py
Docfile's picture
Update app.py
824351a verified
raw
history blame
2.03 kB
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
from google import genai
from google.genai import types
import os
from PIL import Image
import io
import base64
import time
app = Flask(__name__)
# Remplacez par votre clé API réelle
GOOGLE_API_KEY = "AIzaSyC_zxN9IHjEAxIoshWPzMfgb9qwMsu5t5Y"
client = genai.Client(
api_key=GOOGLE_API_KEY,
# Use `v1alpha` so you can see the `thought` flag.
http_options={'api_version': 'v1alpha'},
)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/solve', methods=['POST'])
def solve():
try:
image_data = request.files['image'].read()
img = Image.open(io.BytesIO(image_data))
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
response_stream = client.models.generate_content_stream(
model="gemini-2.0-flash-thinking-exp-01-21",
config={'thinking_config': {'include_thoughts': True}},
contents=[
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
"Résous ce problème?"
]
)
def generate():
try:
for chunk in response_stream:
for part in chunk.candidates[0].content.parts:
if part.thought:
yield f"data: {json.dumps({'thought': part.text})}\n\n"
else:
yield f"data: {json.dumps({'answer': part.text})}\n\n"
time.sleep(0.05) # Contrôler la vitesse de streaming
except Exception as e:
print(f"Error during generation: {e}")
yield f"data: {{ \"error\": \"{e}\" }}\n\n"
return Response(generate(), mimetype='text/event-stream')
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(debug=True)