File size: 6,010 Bytes
c558be9 13a7675 c558be9 13a7675 c558be9 13a7675 48f06a6 c558be9 13a7675 583310d 13a7675 a18bd58 13a7675 c558be9 13a7675 c558be9 13a7675 48f06a6 13a7675 c558be9 48f06a6 13a7675 48f06a6 13a7675 c558be9 f7cf3be 13a7675 2deb7a7 c558be9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
# app.py
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
"""
AnyCoderΒ /Β ShashaΒ AI β Gradio backβend
β’ Serves the custom frontβend located in static/index.html (+Β static/style.css,
static/index.js).
β’ Exposes ONE httpΒ POST endpoint β /run/predict (Gradio api_name="predict")
that the browser JS calls to run the model and get generated code.
All heavy lifting (model registry, provider routing, webβsearch, etc.) lives in
β’ models.py, inference.py, utils.py, deploy.py β¦
"""
from pathlib import Path
from typing import List, Tuple, Dict
import gradio as gr
# ββ Local helpers ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
from inference import chat_completion
from tavily_search import enhance_query_with_search
from deploy import send_to_sandbox
from models import AVAILABLE_MODELS, find_model, ModelInfo
from utils import (
extract_text_from_file,
extract_website_content,
history_to_messages,
apply_search_replace_changes,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output,
)
# ββ System prompts keyed by language ββββββββββββββββββββββββββββββββββββββββ
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. Return exactly ONE file wrapped in "
"```html ...```."
),
"transformers.js": (
"Generate THREE separate files (index.html β’ index.js β’ style.css) each "
"inside its own fenced block."
),
}
# ββ Outputβhistory data structure βββββββββββββββββββββββββββββββββββββββββββ
History = List[Tuple[str, str]] #Β [(user_query, generated_code), β¦]
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# 1. Backend callback hit by the JS frontβend
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def generate(
prompt: str,
file_path: str | None,
website_url: str | None,
model_id: str,
language: str,
web_search: bool,
history: History | None,
) -> Dict[str, str]:
"""
The only public API. Returns: { "code": <string> }
"""
history = history or []
# ---- Build system + user messages --------------------------------------
sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
messages = history_to_messages(history, sys_prompt)
ctx: list[str] = [prompt.strip()]
if file_path:
ctx.append("[File]")
ctx.append(extract_text_from_file(file_path)[:5_000])
if website_url:
html = extract_website_content(website_url)
if not html.startswith("Error"):
ctx.append("[Website]")
ctx.append(html[:8_000])
user_query = "\n\n".join(filter(None, ctx))
user_query = enhance_query_with_search(user_query, web_search)
messages.append({"role": "user", "content": user_query})
# ---- Call model --------------------------------------------------------
model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
assistant = chat_completion(model.id, messages)
# ---- Postβprocess output ----------------------------------------------
if language == "transformers.js":
files = parse_transformers_js_output(assistant)
code = format_transformers_js_output(files)
else:
clean = remove_code_block(assistant)
if history and not history[-1][1].startswith("β"):
clean = apply_search_replace_changes(history[-1][1], clean)
code = clean
# (preview iframe is rendered entirely clientβside from code)
return {"code": code}
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# 2. Gradio wrapper
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Read static frontβend
INDEX_HTML = Path("static/index.html").read_text(encoding="utf-8")
with gr.Blocks(css="body{margin:0}", title="AnyCoderΒ AI") as demo:
# 2βa Β Serve the custom UI
gr.HTML(INDEX_HTML)
# 2βb Invisible components acting as our JSON REST API
with gr.Group(visible=False) as backend:
prompt_in = gr.Textbox()
file_in = gr.File()
url_in = gr.Textbox()
model_in = gr.Textbox()
lang_in = gr.Textbox()
search_in = gr.Checkbox()
hist_state = gr.State([]) # persists conversation on server
code_out = gr.JSON(label="code") # returns {"code": "..."} to the JS
trigger = gr.Button(visible=False)
trigger.click(
fn=generate,
inputs=[
prompt_in, file_in, url_in,
model_in, lang_in, search_in, hist_state
],
outputs=[code_out],
api_name="predict", # <β POST /run/predict
)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
demo.queue().launch()
|