# app.py # ────────────────────────────────────────────────────────────────────────────── """ AnyCoder / Shasha AI – Gradio back‑end • Serves the custom front‑end located in static/index.html (+ static/style.css, static/index.js). • Exposes ONE http POST endpoint → /run/predict (Gradio api_name="predict") that the browser JS calls to run the model and get generated code. All heavy lifting (model registry, provider routing, web‑search, etc.) lives in • models.py, inference.py, utils.py, deploy.py … """ from pathlib import Path from typing import List, Tuple, Dict import gradio as gr # ── Local helpers ──────────────────────────────────────────────────────────── from inference import chat_completion from tavily_search import enhance_query_with_search from deploy import send_to_sandbox from models import AVAILABLE_MODELS, find_model, ModelInfo from utils import ( extract_text_from_file, extract_website_content, history_to_messages, apply_search_replace_changes, remove_code_block, parse_transformers_js_output, format_transformers_js_output, ) # ── System prompts keyed by language ──────────────────────────────────────── SYSTEM_PROMPTS = { "html": ( "ONLY USE HTML, CSS AND JAVASCRIPT. Return exactly ONE file wrapped in " "```html ...```." ), "transformers.js": ( "Generate THREE separate files (index.html • index.js • style.css) each " "inside its own fenced block." ), } # ── Output‑history data structure ─────────────────────────────────────────── History = List[Tuple[str, str]] # [(user_query, generated_code), …] # ════════════════════════════════════════════════════════════════════════════ # 1. Backend callback hit by the JS front‑end # ════════════════════════════════════════════════════════════════════════════ def generate( prompt: str, file_path: str | None, website_url: str | None, model_id: str, language: str, web_search: bool, history: History | None, ) -> Dict[str, str]: """ The only public API. Returns: { "code": } """ history = history or [] # ---- Build system + user messages -------------------------------------- sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.") messages = history_to_messages(history, sys_prompt) ctx: list[str] = [prompt.strip()] if file_path: ctx.append("[File]") ctx.append(extract_text_from_file(file_path)[:5_000]) if website_url: html = extract_website_content(website_url) if not html.startswith("Error"): ctx.append("[Website]") ctx.append(html[:8_000]) user_query = "\n\n".join(filter(None, ctx)) user_query = enhance_query_with_search(user_query, web_search) messages.append({"role": "user", "content": user_query}) # ---- Call model -------------------------------------------------------- model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0] assistant = chat_completion(model.id, messages) # ---- Post‑process output ---------------------------------------------- if language == "transformers.js": files = parse_transformers_js_output(assistant) code = format_transformers_js_output(files) else: clean = remove_code_block(assistant) if history and not history[-1][1].startswith("❌"): clean = apply_search_replace_changes(history[-1][1], clean) code = clean # (preview iframe is rendered entirely client‑side from code) return {"code": code} # ════════════════════════════════════════════════════════════════════════════ # 2. Gradio wrapper # ════════════════════════════════════════════════════════════════════════════ # Read static front‑end INDEX_HTML = Path("static/index.html").read_text(encoding="utf-8") with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo: # 2‑a  Serve the custom UI gr.HTML(INDEX_HTML) # 2‑b Invisible components acting as our JSON REST API with gr.Group(visible=False) as backend: prompt_in = gr.Textbox() file_in = gr.File() url_in = gr.Textbox() model_in = gr.Textbox() lang_in = gr.Textbox() search_in = gr.Checkbox() hist_state = gr.State([]) # persists conversation on server code_out = gr.JSON(label="code") # returns {"code": "..."} to the JS trigger = gr.Button(visible=False) trigger.click( fn=generate, inputs=[ prompt_in, file_in, url_in, model_in, lang_in, search_in, hist_state ], outputs=[code_out], api_name="predict", # <─ POST /run/predict ) # --------------------------------------------------------------------------- if __name__ == "__main__": demo.queue().launch()