# app.py """ ShashaCode Builder – AI code‑generation playground. • Hugging Face Spaces + Gradio front‑end • Supports prompts, file upload, web‑site scraping, optional web search • Streams code back, shows live HTML preview, can deploy to a user Space """ # ───────────────────────────────────────── Imports import gradio as gr from pathlib import Path from typing import Dict, List, Optional, Tuple, Any from constants import ( # ← all constants live here HTML_SYSTEM_PROMPT, TRANSFORMERS_JS_SYSTEM_PROMPT, SYSTEM_PROMPTS, AVAILABLE_MODELS, DEMO_LIST, GRADIO_SUPPORTED_LANGUAGES, # ← new import SEARCH_START, DIVIDER, REPLACE_END, ) from hf_client import get_inference_client from tavily_search import enhance_query_with_search from utils import ( # helpers split into utils.py history_to_messages, history_to_chatbot_messages, remove_code_block, parse_transformers_js_output, format_transformers_js_output, parse_svelte_output, format_svelte_output, apply_search_replace_changes, apply_transformers_js_search_replace_changes, extract_text_from_file, extract_website_content, get_gradio_language, ) from deploy import send_to_sandbox # ───────────────────────────────────────── Type Aliases History = List[Tuple[str, str]] ModelInfo = Dict[str, Any] # ───────────────────────────────────────── Core Function def generate_code( query: str, file_path: Optional[str], website_url: Optional[str], model: ModelInfo, enable_search: bool, language: str, history: Optional[History], ) -> Tuple[str, History, str, List[Dict[str, str]]]: """Main inference pipeline: build prompt → call model → post‑process.""" query = query or "" history = history or [] # 1. pick system prompt if language == "html": system = HTML_SYSTEM_PROMPT elif language == "transformers.js": system = TRANSFORMERS_JS_SYSTEM_PROMPT else: system = SYSTEM_PROMPTS.get(language, HTML_SYSTEM_PROMPT) # 2. build message list messages = history_to_messages(history, system) ctx_parts = [query.strip()] if file_path: ctx_parts += ["[File]", extract_text_from_file(file_path)[:5000]] if website_url: html = extract_website_content(website_url) if not html.startswith("Error"): ctx_parts += ["[Website]", html[:8000]] user_query = "\n\n".join(ctx_parts) user_query = enhance_query_with_search(user_query, enable_search) messages.append({"role": "user", "content": user_query}) # 3. call model client = get_inference_client(model["id"]) resp = client.chat.completions.create( model=model["id"], messages=messages, max_tokens=16000, temperature=0.15, ) answer = resp.choices[0].message.content # 4. post‑process if language == "transformers.js": files = parse_transformers_js_output(answer) code = format_transformers_js_output(files) preview = send_to_sandbox(files.get("index.html", "")) else: clean = remove_code_block(answer) if history and not history[-1][1].startswith("❌"): clean = apply_search_replace_changes(history[-1][1], clean) code = clean preview = send_to_sandbox(code) if language == "html" else "" history.append((query, code)) chat_msgs = history_to_chatbot_messages(history) return code, history, preview, chat_msgs # ───────────────────────────────────────── UI LOGO_PATH = "assets/logo.png" # ensure this file exists CUSTOM_CSS = """ body {font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;} #logo {max-height:64px;margin:auto;} """ with gr.Blocks(css=CUSTOM_CSS, title="ShashaCode Builder") as demo: state_history = gr.State([]) state_model = gr.State(AVAILABLE_MODELS[0]) # Header with gr.Row(): gr.Image(LOGO_PATH, elem_id="logo", show_label=False, height=64) gr.Markdown("## **AnyCoder AI**\nYour AI partner for generating, modifying & understanding code.") # Sidebar (inputs) with gr.Row(): with gr.Column(scale=1, min_width=300): # Model dd_model = gr.Dropdown( label="AI Model", choices=[m["name"] for m in AVAILABLE_MODELS], value=AVAILABLE_MODELS[0]["name"], ) # Prompt / File / Website tabs with gr.Tabs(): with gr.Tab("Prompt"): tb_prompt = gr.Textbox(label="Describe what you'd like to build…", lines=6) with gr.Tab("File"): inp_file = gr.File(label="Reference file", type="filepath") with gr.Tab("Website"): tb_url = gr.Textbox(label="URL to redesign") # Output config dd_lang = gr.Dropdown( label="Target language", choices=[l for l in GRADIO_SUPPORTED_LANGUAGES if l], # ← fixed list value="html", ) chk_search = gr.Checkbox(label="Enable Tavily Web Search") # Buttons btn_generate = gr.Button("Generate Code", variant="primary") btn_clear = gr.Button("Clear Session", variant="secondary") # Main panel (outputs) with gr.Column(scale=2): with gr.Tabs(): with gr.Tab("Code"): out_code = gr.Code(language="html", show_label=False) with gr.Tab("Preview"): out_prev = gr.HTML() with gr.Tab("History"): out_hist = gr.Chatbot(type="messages") # ─── Callbacks ───────────────────────────────────────────── def _model_from_name(name): return next((m for m in AVAILABLE_MODELS if m["name"] == name), AVAILABLE_MODELS[0]) dd_model.change(lambda n: _model_from_name(n), inputs=dd_model, outputs=state_model) btn_generate.click( fn=generate_code, inputs=[tb_prompt, inp_file, tb_url, state_model, chk_search, dd_lang, state_history], outputs=[out_code, state_history, out_prev, out_hist], ) btn_clear.click(lambda: ("", None, "", [], "", []), outputs=[tb_prompt, inp_file, tb_url, state_history, out_code, out_prev]) if __name__ == "__main__": demo.queue().launch()