builder / app.py
mgbam's picture
Update app.py
8d36c79 verified
raw
history blame
4.3 kB
"""
AnyCoder / Shasha AI – Gradio back‑end
• Serves the custom front‑end shipped in index.html (+ static/style.css & static/index.js).
• Exposes one JSON endpoint (`POST /run/predict`) that the JS front‑end
calls to run model inference.
"""
from pathlib import Path
from typing import List, Tuple
import gradio as gr
# ---- local helpers --------------------------------------------------------
from inference import chat_completion
from tavily_search import enhance_query_with_search
from deploy import send_to_sandbox
from models import AVAILABLE_MODELS, find_model, ModelInfo
from utils import (
extract_text_from_file,
extract_website_content,
history_to_messages,
history_to_chatbot_messages,
apply_search_replace_changes,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output,
)
# ------------------- constants ---------------------------------------------
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. Return ONE html file "
"wrapped in ```html ...```."
),
"transformers.js": (
"Generate THREE separate files (index.html / index.js / style.css) "
"as three fenced blocks."
),
}
History = List[Tuple[str, str]]
# ------------------- core callback -----------------------------------------
def generate(
prompt: str,
file_path: str | None,
website_url: str | None,
model_id: str,
language: str,
enable_search: bool,
history: History | None,
) -> Tuple[str, History]:
"""Called by the JS front‑end via POST /run/predict."""
history = history or []
# ----- build system + messages -----------------------------------------
system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
messages = history_to_messages(history, system_prompt)
ctx_parts: list[str] = [prompt.strip()]
if file_path:
ctx_parts.append("[File]")
ctx_parts.append(extract_text_from_file(file_path)[:5000])
if website_url:
site_html = extract_website_content(website_url)
if not site_html.startswith("Error"):
ctx_parts.append("[Website]")
ctx_parts.append(site_html[:8000])
user_query = "\n\n".join(filter(None, ctx_parts))
user_query = enhance_query_with_search(user_query, enable_search)
messages.append({"role": "user", "content": user_query})
# ----- run model --------------------------------------------------------
model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
answer = chat_completion(model.id, messages)
# ----- post‑process output ---------------------------------------------
if language == "transformers.js":
files = parse_transformers_js_output(answer)
code = format_transformers_js_output(files)
else:
cleaned = remove_code_block(answer)
if history and not history[-1][1].startswith("❌"):
cleaned = apply_search_replace_changes(history[-1][1], cleaned)
code = cleaned
history.append((prompt, code))
return code, history
# ------------------- read custom HTML --------------------------------------
HTML_SOURCE = Path("index.html").read_text(encoding="utf‑8")
# ------------------- Gradio UI ---------------------------------------------
with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
# 1 visible: your custom front‑end
gr.HTML(HTML_SOURCE) # <- sanitize=False removed
# 2 hidden: API inputs / outputs
with gr.Group(visible=False) as api:
prompt_in = gr.Textbox()
file_in = gr.File()
url_in = gr.Textbox()
model_in = gr.Textbox()
lang_in = gr.Textbox()
search_in = gr.Checkbox()
hist_state = gr.State([])
code_out, hist_out = gr.Textbox(), gr.State([])
# expose /run/predict
api_btn = gr.Button(visible=False)
api_btn.click(
fn=generate,
inputs=[
prompt_in, file_in, url_in,
model_in, lang_in, search_in, hist_state
],
outputs=[code_out, hist_out],
api_name="predict",
)
if __name__ == "__main__":
demo.queue().launch()