File size: 5,913 Bytes
bbec1c1 2bcb2e2 10686a9 5fecd0b 9b171dd 10686a9 6bc26de 9b171dd 10686a9 2bcb2e2 10686a9 2bcb2e2 6bc26de 2bcb2e2 bbec1c1 10686a9 71a8f19 10686a9 6bc26de 10686a9 6bc26de 10686a9 6bc26de 10686a9 6bc26de 10686a9 6bc26de 71a8f19 10686a9 6bc26de 10686a9 6bc26de bbec1c1 6bc26de 10686a9 6bc26de 10686a9 6bc26de 10686a9 6bc26de 10686a9 6bc26de 10686a9 6bc26de 2bcb2e2 6bc26de 10686a9 bbec1c1 6bc26de 10686a9 6bc26de 10686a9 5fecd0b 6bc26de 2bcb2e2 6bc26de 71a8f19 6bc26de 71a8f19 6bc26de 71a8f19 6bc26de 10686a9 2bcb2e2 6bc26de 2bcb2e2 6bc26de 2bcb2e2 bbec1c1 6bc26de 2bcb2e2 71a8f19 6bc26de 2bcb2e2 9b171dd bbec1c1 6bc26de 2bcb2e2 71a8f19 6bc26de 2bcb2e2 9b171dd 71a8f19 9b171dd 6bc26de bbec1c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
# app.py (REVISED)
from typing import Optional, Dict, List, Tuple
import gradio as gr
from constants import HTML_SYSTEM_PROMPT, AVAILABLE_MODELS, DEMO_LIST
from hf_client import get_inference_client
from tavily_search import enhance_query_with_search
from utils import (
extract_text_from_file,
extract_website_content,
apply_search_replace_changes,
history_to_messages,
history_to_chatbot_messages,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output
)
from deploy import send_to_sandbox, load_project_from_url
# Type aliases
History = List[Tuple[str, str]]
# Core generation function
def generation_code(
query: Optional[str],
image: Optional[gr.Image],
file: Optional[str],
website_url: Optional[str],
_setting: Dict[str, str],
_history: Optional[History],
_current_model_name: str,
enable_search: bool,
language: str,
provider: str,
hf_token: str # <-- CHANGE 1: Accept the user's HF token from Gradio
) -> Tuple[str, History, str, List[Dict[str, str]]]:
# Initialize inputs
if query is None:
query = ''
if _history is None:
_history = []
# System prompt and history
system_prompt = _setting.get('system', HTML_SYSTEM_PROMPT)
messages = history_to_messages(_history, system_prompt)
# File input
if file:
text = extract_text_from_file(file)
query += f"\n\n[File content]\n{text[:5000]}"
# Website input
if website_url:
text = extract_website_content(website_url)
if not text.startswith('Error'):
query += f"\n\n[Website content]\n{text[:8000]}"
# Web search enhancement
final_query = enhance_query_with_search(query, enable_search)
messages.append({'role': 'user', 'content': final_query})
# Model inference
# <-- CHANGE 2: Pass the user's token to the client constructor
client = get_inference_client(_current_model_name, provider, user_token=hf_token)
resp = client.chat.completions.create(
model=_current_model_name,
messages=messages,
max_tokens=10000
)
content = resp.choices[0].message.content
# Post-processing
has_existing = bool(_history)
if language == 'transformers.js':
files = parse_transformers_js_output(content)
code_str = format_transformers_js_output(files)
preview_html = send_to_sandbox(files['index.html'])
else:
clean = remove_code_block(content)
if has_existing:
clean = apply_search_replace_changes(_history[-1][1], clean)
code_str = clean
preview_html = send_to_sandbox(clean) if language == 'html' else ''
# Update history
new_history = _history + [(query, code_str)]
chat_msgs = history_to_chatbot_messages(new_history)
# The return values are now correct for the updated UI components
return code_str, new_history, preview_html, chat_msgs
# Build UI
with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as demo:
# State
history_state = gr.State([])
setting_state = gr.State({'system': HTML_SYSTEM_PROMPT})
model_state = gr.State(AVAILABLE_MODELS[0]['id'])
with gr.Sidebar():
gr.Markdown("## AnyCoder AI")
# Load project
url_in = gr.Textbox(label="Load HF Space URL", placeholder="https://huggingface.co/spaces/user/project")
load_btn = gr.Button("Import Project")
load_status = gr.Markdown(visible=False)
gr.Markdown("---")
# Inputs
prompt_in = gr.Textbox(label="Prompt", lines=3)
file_in = gr.File(label="Reference file")
image_in = gr.Image(label="Design image")
url_site = gr.Textbox(label="Website URL")
search_chk = gr.Checkbox(label="Enable Web Search")
language_dd = gr.Dropdown(
choices=["html", "python", "transformers.js"],
value="html",
label="Language"
)
model_dd = gr.Dropdown(
choices=[m['name'] for m in AVAILABLE_MODELS],
value=AVAILABLE_MODELS[0]['name'],
label="Model"
)
gen_btn = gr.Button("Generate")
clr_btn = gr.Button("Clear")
with gr.Column():
with gr.Tabs():
with gr.Tab("Code"):
code_out = gr.Code(label="Generated Code")
with gr.Tab("Preview"):
preview_out = gr.HTML(label="Live Preview")
with gr.Tab("History"):
# <-- CHANGE 3: Fix the Gradio error by specifying the modern 'messages' format.
chat_out = gr.Chatbot(label="History", type="messages")
# Events
load_btn.click(
fn=lambda u: load_project_from_url(u),
inputs=[url_in],
outputs=[load_status, code_out, preview_out, url_in, history_state, chat_out]
)
def on_model_change(name: str) -> str:
for m in AVAILABLE_MODELS:
if m['name'] == name:
return m['id']
return AVAILABLE_MODELS[0]['id']
model_dd.change(
fn=on_model_change,
inputs=[model_dd],
outputs=[model_state]
)
# Note: Gradio automatically passes the user's token to any function that
# has an 'hf_token' parameter. You do NOT need to add it to the 'inputs' list.
gen_btn.click(
fn=generation_code,
inputs=[
prompt_in, image_in, file_in, url_site,
setting_state, history_state, model_state,
search_chk, language_dd, gr.State('auto')
],
outputs=[code_out, history_state, preview_out, chat_out]
)
clr_btn.click(
fn=lambda: ([], [], "", []),
outputs=[history_state, chat_out, preview_out, code_out]
)
if __name__ == '__main__':
# <-- CHANGE 4: Launch with hf_token='must' to require login and get user tokens.
demo.queue().launch(hf_token="must") |