builder / app.py
mgbam's picture
Update app.py
a8e5826 verified
raw
history blame
7.05 kB
# app.py
# ────────────────────────────────────────────────────────────────
"""
AnyCoder / Shasha AI – single‑file Gradio interface
• Pick a model (registry in models.py)
• Give context (prompt, file upload, or website URL)
• Choose from 25+ target languages
• Optional Tavily web‑search enrichment
• Generate code, preview HTML, see conversation history
"""
from __future__ import annotations
from typing import List, Tuple, Dict, Any
import gradio as gr
# ── local modules ───────────────────────────────────────────────
from models import AVAILABLE_MODELS, find_model, ModelInfo
from inference import chat_completion
from tavily_search import enhance_query_with_search
from utils import (
extract_text_from_file,
extract_website_content,
history_to_messages,
history_to_chatbot_messages,
apply_search_replace_changes,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output,
)
from deploy import send_to_sandbox
# ── constants ───────────────────────────────────────────────────
SUPPORTED_LANGUAGES = [
"python", "c", "cpp", "markdown", "latex", "json", "html", "css",
"javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
"r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
"sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
"sql-gpSQL", "sql-sparkSQL", "sql-esper",
]
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. "
"Return exactly one complete HTML page wrapped in ```html ...```."
),
"transformers.js": (
"Generate THREE fenced blocks: "
"`index.html`, `index.js`, and `style.css` for a transformers.js web‑app."
),
}
History = List[Tuple[str, str]]
# ── back‑end callback ───────────────────────────────────────────
def generate_code(
prompt: str,
file_path: str | None,
website_url: str | None,
model_name: str,
enable_search: bool,
language: str,
hist: History | None,
) -> Tuple[str, History, str, List[Dict[str, str]]]:
"""Runs on **Generate Code** click."""
hist = hist or []
prompt = (prompt or "").strip()
# 1 build message list
sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
msgs = history_to_messages(hist, sys_prompt)
ctx: List[str] = [prompt] if prompt else []
if file_path:
ctx += ["[File]", extract_text_from_file(file_path)[:5000]]
if website_url:
site_html = extract_website_content(website_url)
if not site_html.startswith("Error"):
ctx += ["[Website]", site_html[:8000]]
user_query = enhance_query_with_search("\n\n".join(ctx), enable_search)
msgs.append({"role": "user", "content": user_query})
# 2 call model through inference.py
model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0]
try:
raw = chat_completion(model.id, msgs)
except Exception as exc: # pragma: no cover
err = f"❌ **Error**\n```{exc}```"
hist.append((prompt, err))
return "", hist, "", history_to_chatbot_messages(hist)
# 3 post‑process response
if language == "transformers.js":
files = parse_transformers_js_output(raw)
code = format_transformers_js_output(files)
preview = send_to_sandbox(files.get("index.html", ""))
else:
cleaned = remove_code_block(raw)
if hist and not hist[-1][1].startswith("❌"):
cleaned = apply_search_replace_changes(hist[-1][1], cleaned)
code = cleaned
preview = send_to_sandbox(cleaned) if language == "html" else ""
hist.append((prompt, code))
chat_display = history_to_chatbot_messages(hist)
return code, hist, preview, chat_display
# ── Gradio UI ───────────────────────────────────────────────────
THEME = gr.themes.Soft(primary_hue="indigo")
CSS = """
body {font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Arial,sans-serif}
#title {text-align:center;font-size:2.3rem;margin-top:1rem}
#subtitle {text-align:center;color:#55606d;margin-bottom:2rem}
"""
with gr.Blocks(theme=THEME, css=CSS, title="AnyCoder AI") as demo:
state_hist: gr.State[History] = gr.State([])
gr.Markdown("## 🚀 AnyCoder AI", elem_id="title")
gr.Markdown("Your AI partner for generating, modifying & understanding code.", elem_id="subtitle")
with gr.Row():
# ── sidebar (inputs)
with gr.Column(scale=1):
gr.Markdown("#### 1 · Model")
model_dd = gr.Dropdown(
choices=[m.name for m in AVAILABLE_MODELS],
value=AVAILABLE_MODELS[0].name,
label="AI Model",
)
gr.Markdown("#### 2 · Context")
with gr.Tabs():
with gr.Tab("Prompt"):
prompt_box = gr.Textbox(lines=6, placeholder="Describe what you want to build…")
with gr.Tab("File"):
file_box = gr.File(type="filepath")
with gr.Tab("Website"):
url_box = gr.Textbox(placeholder="https://example.com")
gr.Markdown("#### 3 · Output")
lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Target Language")
search_ck = gr.Checkbox(label="Enable Tavily Web Search")
with gr.Row():
clear_btn = gr.Button("Clear Session", variant="secondary")
gen_btn = gr.Button("Generate Code", variant="primary")
# ── main panel (outputs)
with gr.Column(scale=2):
with gr.Tabs():
with gr.Tab("Code"):
code_out = gr.Code(interactive=True, label=None)
with gr.Tab("Live Preview"):
preview_out = gr.HTML()
with gr.Tab("History"):
chat_out = gr.Chatbot(type="messages")
# wiring
gen_btn.click(
generate_code,
inputs=[prompt_box, file_box, url_box, model_dd, search_ck, lang_dd, state_hist],
outputs=[code_out, state_hist, preview_out, chat_out],
)
clear_btn.click(
lambda: ("", None, "", "html", False, [], [], "", ""),
outputs=[prompt_box, file_box, url_box, lang_dd, search_ck,
state_hist, code_out, preview_out, chat_out],
queue=False,
)
if __name__ == "__main__":
demo.queue().launch()