File size: 8,149 Bytes
2deb7a7 1c75fd0 583310d 1c75fd0 583310d 256b0b9 1c75fd0 583310d 1c75fd0 2deb7a7 1c75fd0 256b0b9 1c75fd0 256b0b9 1c75fd0 256b0b9 1c75fd0 256b0b9 6dcd973 1c75fd0 583310d 1c75fd0 10686a9 2deb7a7 1c75fd0 2deb7a7 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 256b0b9 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 583310d 1c75fd0 2deb7a7 f7cf3be 1c75fd0 583310d 1c75fd0 583310d f7cf3be 1c75fd0 2deb7a7 256b0b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
# app.py
# ------------------------------------------------------------------
# AnyCoder / Shasha AI – Gradio front‑end
# ------------------------------------------------------------------
"""
A lightweight Gradio UI that lets users:
1. Pick an AI model (OpenAI / Gemini / Groq / HF etc.).
2. Provide context via prompt, file upload, or website URL.
3. Choose a target language (HTML, Python, JS, …) and optionally enable
Tavily web‑search enrichment.
4. Generate code, show a live HTML preview, and keep a session history.
The heavy lifting (provider routing, web‑search merge, code‑post‑processing)
lives in:
• models.py – central model registry
• hf_client.py – provider‑aware InferenceClient factory
• inference.py – chat_completion / stream_chat_completion
• utils.py – helpers (file/website extraction, history utils)
• deploy.py – sandbox renderer & HF Spaces helpers
"""
from __future__ import annotations
from typing import Any, List, Optional, Tuple
import gradio as gr
from deploy import send_to_sandbox
from inference import chat_completion
from models import AVAILABLE_MODELS, ModelInfo, find_model
from tavily_search import enhance_query_with_search
from utils import ( # high‑level utils
apply_search_replace_changes,
extract_text_from_file,
extract_website_content,
format_transformers_js_output,
history_to_chatbot_messages,
history_to_messages,
parse_transformers_js_output,
remove_code_block,
)
# ------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------
SUPPORTED_LANGUAGES = [
"python", "c", "cpp", "markdown", "latex", "json", "html", "css",
"javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
"r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
"sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
"sql-gpSQL", "sql-sparkSQL", "sql-esper"
]
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. Create a modern, responsive UI. "
"Return <strong>ONE</strong> HTML file wrapped in ```html ...```."
),
"transformers.js": (
"You are an expert web developer. Generate THREE separate files "
"(index.html / index.js / style.css) returned as three fenced blocks."
),
}
# ------------------------------------------------------------------
# Core generation callback
# ------------------------------------------------------------------
History = List[Tuple[str, str]]
def generation_code(
prompt: str | None,
file_path: str | None,
website_url: str | None,
model_name: str,
enable_search: bool,
language: str,
state_history: History | None,
) -> Tuple[str, History, str, List[dict[str, str]]]:
"""Backend function wired to the ✨ Generate button."""
prompt = (prompt or "").strip()
history = state_history or []
# ------------------------------------------------------------------
# Compose system prompt + context
# ------------------------------------------------------------------
sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
messages = history_to_messages(history, sys_prompt)
# --- append file / website context --------------------------------
context_parts: list[str] = [prompt]
if file_path:
context_parts.append("[Reference file]")
context_parts.append(extract_text_from_file(file_path)[:5000])
if website_url:
website_html = extract_website_content(website_url)
if not website_html.startswith("Error"):
context_parts.append("[Website content]")
context_parts.append(website_html[:8000])
user_query = "\n\n".join(filter(None, context_parts))
user_query = enhance_query_with_search(user_query, enable_search)
messages.append({"role": "user", "content": user_query})
# ------------------------------------------------------------------
# Call model via inference.py – provider routing handled inside
# ------------------------------------------------------------------
model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0]
try:
assistant_reply = chat_completion(model.id, messages)
except Exception as exc: # pragma: no cover
err_msg = f"❌ **Generation error**\n```{exc}```"
new_history = history + [(prompt, err_msg)]
return "", new_history, "", history_to_chatbot_messages(new_history)
# ------------------------------------------------------------------
# Post‑process output
# ------------------------------------------------------------------
if language == "transformers.js":
files = parse_transformers_js_output(assistant_reply)
code_out = format_transformers_js_output(files)
preview_html = send_to_sandbox(files.get("index.html", ""))
else:
cleaned = remove_code_block(assistant_reply)
# search/replace patching for iterative edits
if history and not history[-1][1].startswith("❌"):
cleaned = apply_search_replace_changes(history[-1][1], cleaned)
code_out = cleaned
preview_html = send_to_sandbox(cleaned) if language == "html" else ""
new_history = history + [(prompt, code_out)]
chat_history = history_to_chatbot_messages(new_history)
return code_out, new_history, preview_html, chat_history
# ------------------------------------------------------------------
# Gradio UI
# ------------------------------------------------------------------
THEME = gr.themes.Soft(primary_hue="blue")
with gr.Blocks(theme=THEME, title="AnyCoder / Shasha AI") as demo:
state_history = gr.State([])
# -------------------- sidebar (inputs) ---------------------------
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 1 · Model")
model_dd = gr.Dropdown(
choices=[m.name for m in AVAILABLE_MODELS],
value=AVAILABLE_MODELS[0].name,
label="AI Model",
)
gr.Markdown("### 2 · Context")
with gr.Tabs():
with gr.Tab("Prompt"):
prompt_box = gr.Textbox(lines=6, placeholder="Describe what you need...")
with gr.Tab("File"):
file_box = gr.File(type="filepath")
with gr.Tab("Website"):
url_box = gr.Textbox(placeholder="https://example.com")
gr.Markdown("### 3 · Output")
lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Language")
search_chk = gr.Checkbox(label="Enable Tavily Web Search")
with gr.Row():
clear_btn = gr.Button("Clear", variant="secondary")
gen_btn = gr.Button("Generate ✨", variant="primary")
# -------------------- main panel (outputs) --------------------
with gr.Column(scale=2):
with gr.Tabs():
with gr.Tab("Code"):
code_out = gr.Code(interactive=True)
with gr.Tab("Preview"):
preview_out = gr.HTML()
with gr.Tab("History"):
chat_out = gr.Chatbot(type="messages")
# -------------------- callbacks ----------------------------------
gen_btn.click(
generation_code,
inputs=[
prompt_box,
file_box,
url_box,
model_dd,
search_chk,
lang_dd,
state_history,
],
outputs=[code_out, state_history, preview_out, chat_out],
)
clear_btn.click(
lambda: ("", None, "", [], "", "", []),
outputs=[prompt_box, file_box, url_box, state_history, code_out, preview_out, chat_out],
queue=False,
)
# ------------------------------------------------------------------
if __name__ == "__main__":
demo.queue().launch()
|