|
|
|
|
|
|
|
|
|
""" |
|
A lightweight Gradio UI that lets users: |
|
|
|
1. Pick an AI model (OpenAI / Gemini / Groq / HF etc.). |
|
2. Provide context via prompt, file upload, or website URL. |
|
3. Choose a target language (HTML, Python, JS, …) and optionally enable |
|
Tavily web‑search enrichment. |
|
4. Generate code, show a live HTML preview, and keep a session history. |
|
|
|
The heavy lifting (provider routing, web‑search merge, code‑post‑processing) |
|
lives in: |
|
• models.py – central model registry |
|
• hf_client.py – provider‑aware InferenceClient factory |
|
• inference.py – chat_completion / stream_chat_completion |
|
• utils.py – helpers (file/website extraction, history utils) |
|
• deploy.py – sandbox renderer & HF Spaces helpers |
|
""" |
|
|
|
from __future__ import annotations |
|
|
|
from typing import Any, List, Optional, Tuple |
|
|
|
import gradio as gr |
|
|
|
from deploy import send_to_sandbox |
|
from inference import chat_completion |
|
from models import AVAILABLE_MODELS, ModelInfo, find_model |
|
from tavily_search import enhance_query_with_search |
|
from utils import ( |
|
apply_search_replace_changes, |
|
extract_text_from_file, |
|
extract_website_content, |
|
format_transformers_js_output, |
|
history_to_chatbot_messages, |
|
history_to_messages, |
|
parse_transformers_js_output, |
|
remove_code_block, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
SUPPORTED_LANGUAGES = [ |
|
"python", "c", "cpp", "markdown", "latex", "json", "html", "css", |
|
"javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell", |
|
"r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite", |
|
"sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql", |
|
"sql-gpSQL", "sql-sparkSQL", "sql-esper" |
|
] |
|
|
|
|
|
SYSTEM_PROMPTS = { |
|
"html": ( |
|
"ONLY USE HTML, CSS AND JAVASCRIPT. Create a modern, responsive UI. " |
|
"Return <strong>ONE</strong> HTML file wrapped in ```html ...```." |
|
), |
|
"transformers.js": ( |
|
"You are an expert web developer. Generate THREE separate files " |
|
"(index.html / index.js / style.css) returned as three fenced blocks." |
|
), |
|
} |
|
|
|
|
|
|
|
|
|
|
|
History = List[Tuple[str, str]] |
|
|
|
|
|
def generation_code( |
|
prompt: str | None, |
|
file_path: str | None, |
|
website_url: str | None, |
|
model_name: str, |
|
enable_search: bool, |
|
language: str, |
|
state_history: History | None, |
|
) -> Tuple[str, History, str, List[dict[str, str]]]: |
|
"""Backend function wired to the ✨ Generate button.""" |
|
prompt = (prompt or "").strip() |
|
history = state_history or [] |
|
|
|
|
|
|
|
|
|
sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.") |
|
messages = history_to_messages(history, sys_prompt) |
|
|
|
|
|
context_parts: list[str] = [prompt] |
|
|
|
if file_path: |
|
context_parts.append("[Reference file]") |
|
context_parts.append(extract_text_from_file(file_path)[:5000]) |
|
|
|
if website_url: |
|
website_html = extract_website_content(website_url) |
|
if not website_html.startswith("Error"): |
|
context_parts.append("[Website content]") |
|
context_parts.append(website_html[:8000]) |
|
|
|
user_query = "\n\n".join(filter(None, context_parts)) |
|
user_query = enhance_query_with_search(user_query, enable_search) |
|
messages.append({"role": "user", "content": user_query}) |
|
|
|
|
|
|
|
|
|
model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0] |
|
try: |
|
assistant_reply = chat_completion(model.id, messages) |
|
except Exception as exc: |
|
err_msg = f"❌ **Generation error**\n```{exc}```" |
|
new_history = history + [(prompt, err_msg)] |
|
return "", new_history, "", history_to_chatbot_messages(new_history) |
|
|
|
|
|
|
|
|
|
if language == "transformers.js": |
|
files = parse_transformers_js_output(assistant_reply) |
|
code_out = format_transformers_js_output(files) |
|
preview_html = send_to_sandbox(files.get("index.html", "")) |
|
else: |
|
cleaned = remove_code_block(assistant_reply) |
|
|
|
if history and not history[-1][1].startswith("❌"): |
|
cleaned = apply_search_replace_changes(history[-1][1], cleaned) |
|
code_out = cleaned |
|
preview_html = send_to_sandbox(cleaned) if language == "html" else "" |
|
|
|
new_history = history + [(prompt, code_out)] |
|
chat_history = history_to_chatbot_messages(new_history) |
|
return code_out, new_history, preview_html, chat_history |
|
|
|
|
|
|
|
|
|
|
|
THEME = gr.themes.Soft(primary_hue="blue") |
|
|
|
with gr.Blocks(theme=THEME, title="AnyCoder / Shasha AI") as demo: |
|
state_history = gr.State([]) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
gr.Markdown("### 1 · Model") |
|
model_dd = gr.Dropdown( |
|
choices=[m.name for m in AVAILABLE_MODELS], |
|
value=AVAILABLE_MODELS[0].name, |
|
label="AI Model", |
|
) |
|
|
|
gr.Markdown("### 2 · Context") |
|
with gr.Tabs(): |
|
with gr.Tab("Prompt"): |
|
prompt_box = gr.Textbox(lines=6, placeholder="Describe what you need...") |
|
with gr.Tab("File"): |
|
file_box = gr.File(type="filepath") |
|
with gr.Tab("Website"): |
|
url_box = gr.Textbox(placeholder="https://example.com") |
|
|
|
gr.Markdown("### 3 · Output") |
|
lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Language") |
|
search_chk = gr.Checkbox(label="Enable Tavily Web Search") |
|
|
|
with gr.Row(): |
|
clear_btn = gr.Button("Clear", variant="secondary") |
|
gen_btn = gr.Button("Generate ✨", variant="primary") |
|
|
|
|
|
with gr.Column(scale=2): |
|
with gr.Tabs(): |
|
with gr.Tab("Code"): |
|
code_out = gr.Code(interactive=True) |
|
with gr.Tab("Preview"): |
|
preview_out = gr.HTML() |
|
with gr.Tab("History"): |
|
chat_out = gr.Chatbot(type="messages") |
|
|
|
|
|
gen_btn.click( |
|
generation_code, |
|
inputs=[ |
|
prompt_box, |
|
file_box, |
|
url_box, |
|
model_dd, |
|
search_chk, |
|
lang_dd, |
|
state_history, |
|
], |
|
outputs=[code_out, state_history, preview_out, chat_out], |
|
) |
|
|
|
clear_btn.click( |
|
lambda: ("", None, "", [], "", "", []), |
|
outputs=[prompt_box, file_box, url_box, state_history, code_out, preview_out, chat_out], |
|
queue=False, |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.queue().launch() |
|
|