Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# app.py (
|
| 2 |
|
| 3 |
from typing import Optional, Dict, List, Tuple
|
| 4 |
import gradio as gr
|
|
@@ -33,7 +33,7 @@ def generation_code(
|
|
| 33 |
enable_search: bool,
|
| 34 |
language: str,
|
| 35 |
provider: str,
|
| 36 |
-
hf_token: str #
|
| 37 |
) -> Tuple[str, History, str, List[Dict[str, str]]]:
|
| 38 |
# Initialize inputs
|
| 39 |
if query is None:
|
|
@@ -61,7 +61,6 @@ def generation_code(
|
|
| 61 |
messages.append({'role': 'user', 'content': final_query})
|
| 62 |
|
| 63 |
# Model inference
|
| 64 |
-
# <-- CHANGE 2: Pass the user's token to the client constructor
|
| 65 |
client = get_inference_client(_current_model_name, provider, user_token=hf_token)
|
| 66 |
resp = client.chat.completions.create(
|
| 67 |
model=_current_model_name,
|
|
@@ -87,7 +86,6 @@ def generation_code(
|
|
| 87 |
new_history = _history + [(query, code_str)]
|
| 88 |
chat_msgs = history_to_chatbot_messages(new_history)
|
| 89 |
|
| 90 |
-
# The return values are now correct for the updated UI components
|
| 91 |
return code_str, new_history, preview_html, chat_msgs
|
| 92 |
|
| 93 |
# Build UI
|
|
@@ -99,30 +97,18 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 99 |
|
| 100 |
with gr.Sidebar():
|
| 101 |
gr.Markdown("## AnyCoder AI")
|
| 102 |
-
|
| 103 |
-
# Load project
|
| 104 |
url_in = gr.Textbox(label="Load HF Space URL", placeholder="https://huggingface.co/spaces/user/project")
|
| 105 |
load_btn = gr.Button("Import Project")
|
| 106 |
load_status = gr.Markdown(visible=False)
|
| 107 |
-
|
| 108 |
gr.Markdown("---")
|
| 109 |
-
|
| 110 |
-
# Inputs
|
| 111 |
prompt_in = gr.Textbox(label="Prompt", lines=3)
|
| 112 |
file_in = gr.File(label="Reference file")
|
| 113 |
image_in = gr.Image(label="Design image")
|
| 114 |
url_site = gr.Textbox(label="Website URL")
|
| 115 |
search_chk = gr.Checkbox(label="Enable Web Search")
|
| 116 |
-
language_dd = gr.Dropdown(
|
| 117 |
-
|
| 118 |
-
value="html",
|
| 119 |
-
label="Language"
|
| 120 |
-
)
|
| 121 |
-
model_dd = gr.Dropdown(
|
| 122 |
-
choices=[m['name'] for m in AVAILABLE_MODELS],
|
| 123 |
-
value=AVAILABLE_MODELS[0]['name'],
|
| 124 |
-
label="Model"
|
| 125 |
-
)
|
| 126 |
gen_btn = gr.Button("Generate")
|
| 127 |
clr_btn = gr.Button("Clear")
|
| 128 |
|
|
@@ -133,7 +119,6 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 133 |
with gr.Tab("Preview"):
|
| 134 |
preview_out = gr.HTML(label="Live Preview")
|
| 135 |
with gr.Tab("History"):
|
| 136 |
-
# <-- CHANGE 3: Fix the Gradio error by specifying the modern 'messages' format.
|
| 137 |
chat_out = gr.Chatbot(label="History", type="messages")
|
| 138 |
|
| 139 |
# Events
|
|
@@ -149,29 +134,21 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 149 |
return m['id']
|
| 150 |
return AVAILABLE_MODELS[0]['id']
|
| 151 |
|
| 152 |
-
model_dd.change(
|
| 153 |
-
fn=on_model_change,
|
| 154 |
-
inputs=[model_dd],
|
| 155 |
-
outputs=[model_state]
|
| 156 |
-
)
|
| 157 |
|
| 158 |
-
# Note: Gradio automatically passes the user's token to any function that
|
| 159 |
-
# has an 'hf_token' parameter. You do NOT need to add it to the 'inputs' list.
|
| 160 |
gen_btn.click(
|
| 161 |
fn=generation_code,
|
| 162 |
inputs=[
|
| 163 |
prompt_in, image_in, file_in, url_site,
|
| 164 |
setting_state, history_state, model_state,
|
| 165 |
-
search_chk, language_dd, gr.State('auto')
|
|
|
|
| 166 |
],
|
| 167 |
outputs=[code_out, history_state, preview_out, chat_out]
|
| 168 |
)
|
| 169 |
|
| 170 |
-
clr_btn.click(
|
| 171 |
-
fn=lambda: ([], [], "", []),
|
| 172 |
-
outputs=[history_state, chat_out, preview_out, code_out]
|
| 173 |
-
)
|
| 174 |
|
| 175 |
if __name__ == '__main__':
|
| 176 |
-
#
|
| 177 |
-
demo.queue().launch(
|
|
|
|
| 1 |
+
# app.py (FINAL CORRECTED VERSION)
|
| 2 |
|
| 3 |
from typing import Optional, Dict, List, Tuple
|
| 4 |
import gradio as gr
|
|
|
|
| 33 |
enable_search: bool,
|
| 34 |
language: str,
|
| 35 |
provider: str,
|
| 36 |
+
hf_token: str # This parameter is now correctly passed from the UI
|
| 37 |
) -> Tuple[str, History, str, List[Dict[str, str]]]:
|
| 38 |
# Initialize inputs
|
| 39 |
if query is None:
|
|
|
|
| 61 |
messages.append({'role': 'user', 'content': final_query})
|
| 62 |
|
| 63 |
# Model inference
|
|
|
|
| 64 |
client = get_inference_client(_current_model_name, provider, user_token=hf_token)
|
| 65 |
resp = client.chat.completions.create(
|
| 66 |
model=_current_model_name,
|
|
|
|
| 86 |
new_history = _history + [(query, code_str)]
|
| 87 |
chat_msgs = history_to_chatbot_messages(new_history)
|
| 88 |
|
|
|
|
| 89 |
return code_str, new_history, preview_html, chat_msgs
|
| 90 |
|
| 91 |
# Build UI
|
|
|
|
| 97 |
|
| 98 |
with gr.Sidebar():
|
| 99 |
gr.Markdown("## AnyCoder AI")
|
| 100 |
+
# ... (rest of sidebar UI is correct) ...
|
|
|
|
| 101 |
url_in = gr.Textbox(label="Load HF Space URL", placeholder="https://huggingface.co/spaces/user/project")
|
| 102 |
load_btn = gr.Button("Import Project")
|
| 103 |
load_status = gr.Markdown(visible=False)
|
|
|
|
| 104 |
gr.Markdown("---")
|
|
|
|
|
|
|
| 105 |
prompt_in = gr.Textbox(label="Prompt", lines=3)
|
| 106 |
file_in = gr.File(label="Reference file")
|
| 107 |
image_in = gr.Image(label="Design image")
|
| 108 |
url_site = gr.Textbox(label="Website URL")
|
| 109 |
search_chk = gr.Checkbox(label="Enable Web Search")
|
| 110 |
+
language_dd = gr.Dropdown(choices=["html", "python", "transformers.js"], value="html", label="Language")
|
| 111 |
+
model_dd = gr.Dropdown(choices=[m['name'] for m in AVAILABLE_MODELS], value=AVAILABLE_MODELS[0]['name'], label="Model")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
gen_btn = gr.Button("Generate")
|
| 113 |
clr_btn = gr.Button("Clear")
|
| 114 |
|
|
|
|
| 119 |
with gr.Tab("Preview"):
|
| 120 |
preview_out = gr.HTML(label="Live Preview")
|
| 121 |
with gr.Tab("History"):
|
|
|
|
| 122 |
chat_out = gr.Chatbot(label="History", type="messages")
|
| 123 |
|
| 124 |
# Events
|
|
|
|
| 134 |
return m['id']
|
| 135 |
return AVAILABLE_MODELS[0]['id']
|
| 136 |
|
| 137 |
+
model_dd.change(fn=on_model_change, inputs=[model_dd], outputs=[model_state])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
|
|
|
|
|
|
| 139 |
gen_btn.click(
|
| 140 |
fn=generation_code,
|
| 141 |
inputs=[
|
| 142 |
prompt_in, image_in, file_in, url_site,
|
| 143 |
setting_state, history_state, model_state,
|
| 144 |
+
search_chk, language_dd, gr.State('auto'),
|
| 145 |
+
"hf_token" # ### FIX #1: This line fixes the "Expected 11 arguments, received 10" warning.
|
| 146 |
],
|
| 147 |
outputs=[code_out, history_state, preview_out, chat_out]
|
| 148 |
)
|
| 149 |
|
| 150 |
+
clr_btn.click(fn=lambda: ([], [], "", []), outputs=[history_state, chat_out, preview_out, code_out])
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
if __name__ == '__main__':
|
| 153 |
+
# ### FIX #2: This line fixes the "unexpected keyword argument 'hf_token'" TypeError.
|
| 154 |
+
demo.queue().launch()
|