from flask import Flask, render_template_string, jsonify from apscheduler.schedulers.background import BackgroundScheduler import subprocess import threading import pytz from datetime import datetime from io import TextIOWrapper app = Flask(__name__) execution_logs = [] MAX_LOG_ENTRIES = 20 # Now tracks executions, not individual lines log_lock = threading.Lock() # Thread safety for execution_logs process_lock = threading.Lock() # Prevent concurrent script execution def run_cli_script(): """Runs cli.py and captures output/errors into a single log entry.""" if not process_lock.acquire(blocking=False): # Another instance is already running return try: # Get current time in IST utc_now = datetime.utcnow() ist = pytz.timezone("Asia/Kolkata") timestamp = utc_now.replace(tzinfo=pytz.utc).astimezone(ist).strftime("%Y-%m-%d %H:%M:%S IST") log_entry = {'time': timestamp, 'output': '', 'error': ''} with subprocess.Popen( ["python", "cli.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, text=True ) as process: # Thread function to capture output def capture_stream(stream, type): for line in stream: if type == 'output': log_entry['output'] += line print(line, end='') else: log_entry['error'] += line print(line, end='', file=sys.stderr) # Start threads to capture stdout and stderr stdout_thread = threading.Thread( target=capture_stream, args=(process.stdout, 'output') ) stderr_thread = threading.Thread( target=capture_stream, args=(process.stderr, 'error') ) stdout_thread.start() stderr_thread.start() # Wait for process to complete or timeout (e.g., 1 hour) process.wait(timeout=3600) # Wait for threads to finish stdout_thread.join() stderr_thread.join() # Add completed log entry with log_lock: execution_logs.append(log_entry) if len(execution_logs) > MAX_LOG_ENTRIES: execution_logs.pop(0) except subprocess.TimeoutExpired: process.terminate() log_entry['error'] += "\nProcess timed out after 1 hour." except Exception as e: log_entry['error'] += f"\nUnexpected error: {str(e)}" finally: process_lock.release() def start_initial_run(): threading.Thread(target=run_cli_script, daemon=True).start() scheduler = BackgroundScheduler(daemon=True) scheduler.add_job( run_cli_script, 'interval', hours=3, id='main_job', timezone=pytz.utc # Scheduler uses UTC internally ) scheduler.start() start_initial_run() @app.route('/') def home(): job = scheduler.get_job('main_job') next_run = job.next_run_time.astimezone(pytz.timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S IST') if job else 'N/A' return render_template_string(''' ''', next_run=next_run) @app.route('/logs') def logs(): with log_lock: return jsonify({'logs': execution_logs[::-1]}) # Newest logs first @app.route('/force-run') def force_run(): if threading.Thread(target=run_cli_script, daemon=True).start(): return "Script started manually", 200 return "Script is already running", 429 @app.route('/run-check') def run_check(): if not scheduler.running: scheduler.start() return "Scheduler is running", 200 if __name__ == '__main__': app.run(host='0.0.0.0', port=7860)