File size: 4,303 Bytes
583310d 8d36c79 256b0b9 8d36c79 d8cd951 1c75fd0 d8cd951 8d36c79 1c75fd0 583310d 1c75fd0 8d36c79 256b0b9 1c75fd0 d8cd951 1c75fd0 d8cd951 256b0b9 6dcd973 d8cd951 1c75fd0 d8cd951 1c75fd0 d8cd951 1c75fd0 d8cd951 1c75fd0 d8cd951 2deb7a7 d8cd951 8d36c79 d8cd951 1c75fd0 8d36c79 d8cd951 1c75fd0 d8cd951 1c75fd0 d8cd951 1c75fd0 8d36c79 d8cd951 8d36c79 1c75fd0 d8cd951 1c75fd0 8d36c79 d8cd951 8d36c79 583310d d8cd951 256b0b9 d8cd951 1c75fd0 d8cd951 8d36c79 d8cd951 1c75fd0 d8cd951 1c75fd0 d8cd951 583310d f7cf3be 2deb7a7 256b0b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
"""
AnyCoder / Shasha AI – Gradio back‑end
• Serves the custom front‑end shipped in index.html (+ static/style.css & static/index.js).
• Exposes one JSON endpoint (`POST /run/predict`) that the JS front‑end
calls to run model inference.
"""
from pathlib import Path
from typing import List, Tuple
import gradio as gr
# ---- local helpers --------------------------------------------------------
from inference import chat_completion
from tavily_search import enhance_query_with_search
from deploy import send_to_sandbox
from models import AVAILABLE_MODELS, find_model, ModelInfo
from utils import (
extract_text_from_file,
extract_website_content,
history_to_messages,
history_to_chatbot_messages,
apply_search_replace_changes,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output,
)
# ------------------- constants ---------------------------------------------
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. Return ONE html file "
"wrapped in ```html ...```."
),
"transformers.js": (
"Generate THREE separate files (index.html / index.js / style.css) "
"as three fenced blocks."
),
}
History = List[Tuple[str, str]]
# ------------------- core callback -----------------------------------------
def generate(
prompt: str,
file_path: str | None,
website_url: str | None,
model_id: str,
language: str,
enable_search: bool,
history: History | None,
) -> Tuple[str, History]:
"""Called by the JS front‑end via POST /run/predict."""
history = history or []
# ----- build system + messages -----------------------------------------
system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
messages = history_to_messages(history, system_prompt)
ctx_parts: list[str] = [prompt.strip()]
if file_path:
ctx_parts.append("[File]")
ctx_parts.append(extract_text_from_file(file_path)[:5000])
if website_url:
site_html = extract_website_content(website_url)
if not site_html.startswith("Error"):
ctx_parts.append("[Website]")
ctx_parts.append(site_html[:8000])
user_query = "\n\n".join(filter(None, ctx_parts))
user_query = enhance_query_with_search(user_query, enable_search)
messages.append({"role": "user", "content": user_query})
# ----- run model --------------------------------------------------------
model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
answer = chat_completion(model.id, messages)
# ----- post‑process output ---------------------------------------------
if language == "transformers.js":
files = parse_transformers_js_output(answer)
code = format_transformers_js_output(files)
else:
cleaned = remove_code_block(answer)
if history and not history[-1][1].startswith("❌"):
cleaned = apply_search_replace_changes(history[-1][1], cleaned)
code = cleaned
history.append((prompt, code))
return code, history
# ------------------- read custom HTML --------------------------------------
HTML_SOURCE = Path("index.html").read_text(encoding="utf‑8")
# ------------------- Gradio UI ---------------------------------------------
with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
# 1 visible: your custom front‑end
gr.HTML(HTML_SOURCE) # <- sanitize=False removed
# 2 hidden: API inputs / outputs
with gr.Group(visible=False) as api:
prompt_in = gr.Textbox()
file_in = gr.File()
url_in = gr.Textbox()
model_in = gr.Textbox()
lang_in = gr.Textbox()
search_in = gr.Checkbox()
hist_state = gr.State([])
code_out, hist_out = gr.Textbox(), gr.State([])
# expose /run/predict
api_btn = gr.Button(visible=False)
api_btn.click(
fn=generate,
inputs=[
prompt_in, file_in, url_in,
model_in, lang_in, search_in, hist_state
],
outputs=[code_out, hist_out],
api_name="predict",
)
if __name__ == "__main__":
demo.queue().launch()
|