# app.py (root) from pathlib import Path import gradio as gr from inference import chat_completion # your back‑end helpers from tavily_search import enhance_query_with_search from utils import (extract_text_from_file, extract_website_content, history_to_messages, apply_search_replace_changes, remove_code_block, parse_transformers_js_output, format_transformers_js_output) from models import AVAILABLE_MODELS, find_model from deploy import send_to_sandbox # ---------- backend callback ------------- def generate(payload): prompt = payload["prompt"] model = find_model(payload["model_id"]) lang = payload["language"] history = payload.get("history", []) # … same logic as before, returns {"code": ..., "history": [...] } # (keep your existing generate() from the compact app.py) return code, history # ---------- UI --------------------------- INDEX = Path("static/index.html").read_text(encoding="utf-8") with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo: # custom front‑end gr.HTML(INDEX, unsafe_allow_html=True) # hidden API endpoints generator = gr.JSON( label="hidden", visible=False ).api(generate, path="/run/predict", methods=["POST"]) if __name__ == "__main__": demo.launch()