File size: 7,426 Bytes
93f08f4 48f06a6 93f08f4 4f8a74b 13a7675 93f08f4 583310d 93f08f4 c558be9 1bd1ac4 93f08f4 c558be9 93f08f4 4f8a74b 1bd1ac4 93f08f4 1bd1ac4 93f08f4 13a7675 93f08f4 48f06a6 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 4f8a74b 93f08f4 13a7675 93f08f4 4f8a74b 1bd1ac4 13a7675 93f08f4 4f8a74b 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 4f8a74b 93f08f4 1bd1ac4 93f08f4 1bd1ac4 93f08f4 1bd1ac4 c558be9 f7cf3be 93f08f4 2deb7a7 c558be9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
# app.py
# ---------------------------------------------------------------------
# AnyCoder / Shasha AI – Gradio front‑end (no external static files)
# ---------------------------------------------------------------------
"""
Interactive UI for generating / modifying / previewing code with multiple
LLM back‑ends. Relies on:
• models.py – AVAILABLE_MODELS registry + find_model()
• inference.py – chat_completion() (provider routing handled there)
• utils.py – file / website extractors, history helpers, etc.
• tavily_search.py (optional) – enhance_query_with_search()
• deploy.py – send_to_sandbox() for live HTML preview
"""
from __future__ import annotations
from pathlib import Path
from typing import List, Tuple, Dict, Any, Optional
import gradio as gr
# ---------- local helpers --------------------------------------------------
from models import AVAILABLE_MODELS, find_model, ModelInfo
from inference import chat_completion
from tavily_search import enhance_query_with_search
from utils import (
extract_text_from_file,
extract_website_content,
history_to_messages,
history_to_chatbot_messages,
apply_search_replace_changes,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output,
)
from deploy import send_to_sandbox
# ---------- constants ------------------------------------------------------
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. Produce ONE complete html file "
"wrapped in ```html … ```."
),
"transformers.js": (
"Generate THREE fenced blocks: index.html, index.js, style.css "
"for a transformers.js demo."
),
}
SUPPORTED_LANGUAGES = [
"python", "c", "cpp", "markdown", "latex", "json", "html", "css",
"javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
"r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
"sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
"sql-gpSQL", "sql-sparkSQL", "sql-esper",
]
History = List[Tuple[str, str]] # [(prompt, code/result)]
# ---------- core generation callback --------------------------------------
def generate(
prompt: str,
file_path: str | None,
website_url: str | None,
model_name: str,
language: str,
use_search: bool,
hist: History | None,
) -> Tuple[str, History, str, List[Dict[str, str]]]:
"""Main callback wired to the “Generate Code” button."""
hist = hist or []
user_prompt = (prompt or "").strip()
# 1 · system + previous messages
sys = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
messages = history_to_messages(hist, sys)
# 2 · gather extra context
ctx_parts: list[str] = [user_prompt]
if file_path:
ctx_parts.append("[Reference file]")
ctx_parts.append(extract_text_from_file(file_path)[:5000])
if website_url:
html = extract_website_content(website_url)
if not html.lower().startswith("error"):
ctx_parts.append("[Website content]")
ctx_parts.append(html[:8000])
user_query = "\n\n".join(filter(None, ctx_parts))
user_query = enhance_query_with_search(user_query, use_search)
messages.append({"role": "user", "content": user_query})
# 3 · call the model
model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0]
try:
assistant = chat_completion(model.id, messages)
except Exception as exc:
err = f"❌ **Error**\n```{exc}```"
hist.append((user_prompt, err))
return "", hist, "", history_to_chatbot_messages(hist)
# 4 · post‑process
if language == "transformers.js":
files = parse_transformers_js_output(assistant)
code = format_transformers_js_output(files)
preview = send_to_sandbox(files.get("index.html", ""))
else:
cleaned = remove_code_block(assistant)
if hist and not hist[-1][1].startswith("❌"):
cleaned = apply_search_replace_changes(hist[-1][1], cleaned)
code = cleaned
preview = send_to_sandbox(code) if language == "html" else ""
# 5 · update history & chat
hist.append((user_prompt, code))
chat_messages = history_to_chatbot_messages(hist)
return code, hist, preview, chat_messages
# ---------- UI (Gradio 5) --------------------------------------------------
THEME = gr.themes.Soft(primary_hue="indigo")
CSS = """
.gradio-container {max-width: 1450px !important;}
@media (min-width: 960px){
.layout {display:flex; gap:32px}
.inputs {flex:1 0 360px}
.outputs {flex:2 0 0}
}
"""
with gr.Blocks(title="AnyCoder AI", theme=THEME, css=CSS) as demo:
state_hist: gr.State = gr.State([])
# ---- header ----------------------------------------------------------
gr.Markdown(
"## 🚀 **AnyCoder AI** \n"
"Your AI partner for generating, modifying & understanding code."
)
with gr.Row(elem_classes="layout"):
# -------- left column (inputs) ------------------------------------
with gr.Column(elem_classes="inputs"):
gr.Markdown("### 1 · Model")
model_dd = gr.Dropdown(
choices=[m.name for m in AVAILABLE_MODELS],
value=AVAILABLE_MODELS[0].name,
label="AI Model",
)
gr.Markdown("### 2 · Context")
with gr.Tabs():
with gr.Tab("Prompt"):
prompt_box = gr.Textbox(lines=6, placeholder="Describe what you need…")
with gr.Tab("File"):
file_box = gr.File(type="filepath")
with gr.Tab("Website"):
url_box = gr.Textbox(placeholder="https://example.com")
gr.Markdown("### 3 · Output")
lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Target Language")
search_chk = gr.Checkbox(label="Enable Tavily Web Search")
with gr.Row():
clear_btn = gr.Button("Clear Session", variant="secondary")
gen_btn = gr.Button("Generate Code", variant="primary")
# -------- right column (outputs) ----------------------------------
with gr.Column(elem_classes="outputs"):
with gr.Tabs():
with gr.Tab("Code"):
code_out = gr.Code(height=500, language="html")
with gr.Tab("Live Preview"):
preview_out = gr.HTML()
with gr.Tab("History"):
chat_out = gr.Chatbot(type="messages")
# ---- callbacks -------------------------------------------------------
gen_btn.click(
generate,
inputs=[
prompt_box, file_box, url_box,
model_dd, lang_dd, search_chk, state_hist
],
outputs=[code_out, state_hist, preview_out, chat_out],
)
clear_btn.click(
lambda: ("", None, "", "html", False, [], [], "", []),
outputs=[
prompt_box, file_box, url_box,
lang_dd, search_chk, state_hist,
code_out, preview_out, chat_out,
],
queue=False,
)
# ---------- launch ---------------------------------------------------------
if __name__ == "__main__":
demo.queue().launch()
|