# app.py # ──────────────────────────────────────────────────────────────── """ AnyCoder / Shasha AI – lightweight Gradio front‑end • Pick an AI model from models.py ➜ AVAILABLE_MODELS • Provide context (prompt / file / website) • Choose a target language from 25+ options • Optional Tavily web‑search enrichment • Generate code & live‑preview HTML """ from __future__ import annotations from pathlib import Path from typing import List, Tuple, Dict, Any, Optional import gradio as gr # ── local helpers ─────────────────────────────────────────────── from models import AVAILABLE_MODELS, find_model, ModelInfo from inference import chat_completion from tavily_search import enhance_query_with_search from utils import ( extract_text_from_file, extract_website_content, history_to_messages, history_to_chatbot_messages, apply_search_replace_changes, remove_code_block, parse_transformers_js_output, format_transformers_js_output, ) from deploy import send_to_sandbox # ── constants ─────────────────────────────────────────────────── SUPPORTED_LANGUAGES = [ "python", "c", "cpp", "markdown", "latex", "json", "html", "css", "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell", "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite", "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql", "sql-gpSQL", "sql-sparkSQL", "sql-esper", ] SYSTEM_PROMPTS = { "html": ( "ONLY USE HTML, CSS AND JAVASCRIPT. Produce ONE complete HTML file " "wrapped in ```html ...```." ), "transformers.js": ( "Generate THREE fenced blocks (index.html / index.js / style.css) " "for a transformers.js web‑app." ), } History = List[Tuple[str, str]] # ── core callback ─────────────────────────────────────────────── def generate_code( prompt: str, file_path: str | None, website_url: str | None, model_name: str, enable_search: bool, language: str, hist: History | None, ) -> Tuple[str, History, str, List[Dict[str, str]]]: """Back‑end for the ‘Generate Code’ button.""" hist = hist or [] prompt = (prompt or "").strip() # 1 build messages sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.") messages = history_to_messages(hist, sys_prompt) ctx_parts: list[str] = [prompt] if file_path: ctx_parts += ["[File]", extract_text_from_file(file_path)[:5000]] if website_url: html = extract_website_content(website_url) if not html.startswith("Error"): ctx_parts += ["[Website]", html[:8000]] user_msg = enhance_query_with_search("\n\n".join(ctx_parts), enable_search) messages.append({"role": "user", "content": user_msg}) # 2 run model (provider selection handled in inference.chat_completion) model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0] try: raw_out = chat_completion(model.id, messages) except Exception as exc: # pragma: no cover err = f"❌ **Error**\n```{exc}```" hist.append((prompt, err)) return "", hist, "", history_to_chatbot_messages(hist) # 3 post‑process if language == "transformers.js": files = parse_transformers_js_output(raw_out) code = format_transformers_js_output(files) preview = send_to_sandbox(files.get("index.html", "")) else: cleaned = remove_code_block(raw_out) if hist and not hist[-1][1].startswith("❌"): cleaned = apply_search_replace_changes(hist[-1][1], cleaned) code = cleaned preview = send_to_sandbox(cleaned) if language == "html" else "" hist.append((prompt, code)) chat_view = history_to_chatbot_messages(hist) return code, hist, preview, chat_view # ── UI ────────────────────────────────────────────────────────── THEME = gr.themes.Soft(primary_hue="indigo") custom_css = """ body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; } #main_title { text-align:center;font-size:2.4rem;margin-top:1rem } #subtitle { text-align:center;color:#5a6475;margin-bottom:2rem } """ with gr.Blocks(title="AnyCoder AI", theme=THEME, css=custom_css) as demo: state_hist: gr.State[History] = gr.State([]) gr.Markdown("## 🚀 AnyCoder AI", elem_id="main_title") gr.Markdown("Your AI partner for generating, modifying & understanding code.", elem_id="subtitle") with gr.Row(): # ────────── inputs (sidebar) ────────── with gr.Column(scale=1): gr.Markdown("#### 1 · Select Model") model_dd = gr.Dropdown( choices=[m.name for m in AVAILABLE_MODELS], value=AVAILABLE_MODELS[0].name, label="AI Model", ) gr.Markdown("#### 2 · Provide Context") with gr.Tabs(): with gr.Tab("Prompt"): prompt_box = gr.Textbox(lines=6, placeholder="Describe what you want…") with gr.Tab("File"): file_box = gr.File(type="filepath") with gr.Tab("Website"): url_box = gr.Textbox(placeholder="https://example.com") gr.Markdown("#### 3 · Configure Output") lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Target Language") search_ck = gr.Checkbox(label="Enable Tavily Web Search") with gr.Row(): clear_btn = gr.Button("Clear Session", variant="secondary") gen_btn = gr.Button("Generate Code", variant="primary") # ────────── outputs (main panel) ───── with gr.Column(scale=2): with gr.Tabs(): with gr.Tab("Code"): code_out = gr.Code(interactive=True) with gr.Tab("Live Preview"): preview_out = gr.HTML() with gr.Tab("History"): chat_out = gr.Chatbot(type="messages") # ── wiring ─────────────────────────────────────────── gen_btn.click( generate_code, inputs=[prompt_box, file_box, url_box, model_dd, search_ck, lang_dd, state_hist], outputs=[code_out, state_hist, preview_out, chat_out], ) clear_btn.click( lambda: ("", None, "", "html", False, [], [], "", ""), outputs=[prompt_box, file_box, url_box, lang_dd, search_ck, state_hist, code_out, preview_out, chat_out], queue=False, ) if __name__ == "__main__": demo.queue().launch()