mgbam commited on
Commit
d8cd951
·
verified ·
1 Parent(s): 265111f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -166
app.py CHANGED
@@ -1,210 +1,127 @@
1
  # app.py
2
- # ------------------------------------------------------------------
3
- # AnyCoder / Shasha AI – Gradio front‑end
4
- # ------------------------------------------------------------------
5
- """
6
- A lightweight Gradio UI that lets users:
7
-
8
- 1. Pick an AI model (OpenAI / Gemini / Groq / HF etc.).
9
- 2. Provide context via prompt, file upload, or website URL.
10
- 3. Choose a target language (HTML, Python, JS, …) and optionally enable
11
- Tavily web‑search enrichment.
12
- 4. Generate code, show a live HTML preview, and keep a session history.
13
-
14
- The heavy lifting (provider routing, web‑search merge, code‑post‑processing)
15
- lives in:
16
- • models.py – central model registry
17
- • hf_client.py – provider‑aware InferenceClient factory
18
- • inference.py – chat_completion / stream_chat_completion
19
- • utils.py – helpers (file/website extraction, history utils)
20
- • deploy.py – sandbox renderer & HF Spaces helpers
21
  """
 
22
 
23
- from __future__ import annotations
 
 
 
 
24
 
25
- from typing import Any, List, Optional, Tuple
 
26
 
27
  import gradio as gr
28
 
29
- from deploy import send_to_sandbox
30
- from inference import chat_completion
31
- from models import AVAILABLE_MODELS, ModelInfo, find_model
32
  from tavily_search import enhance_query_with_search
33
- from utils import ( # high‑level utils
34
- apply_search_replace_changes,
 
35
  extract_text_from_file,
36
  extract_website_content,
37
- format_transformers_js_output,
38
- history_to_chatbot_messages,
39
  history_to_messages,
40
- parse_transformers_js_output,
 
41
  remove_code_block,
 
 
42
  )
43
 
44
- # ------------------------------------------------------------------
45
- # Configuration
46
- # ------------------------------------------------------------------
47
-
48
- SUPPORTED_LANGUAGES = [
49
- "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
50
- "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
51
- "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
52
- "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
53
- "sql-gpSQL", "sql-sparkSQL", "sql-esper"
54
- ]
55
-
56
-
57
  SYSTEM_PROMPTS = {
58
  "html": (
59
- "ONLY USE HTML, CSS AND JAVASCRIPT. Create a modern, responsive UI. "
60
- "Return <strong>ONE</strong> HTML file wrapped in ```html ...```."
61
  ),
62
  "transformers.js": (
63
- "You are an expert web developer. Generate THREE separate files "
64
- "(index.html / index.js / style.css) returned as three fenced blocks."
65
  ),
66
  }
67
-
68
-
69
- # ------------------------------------------------------------------
70
- # Core generation callback
71
- # ------------------------------------------------------------------
72
  History = List[Tuple[str, str]]
73
 
74
-
75
- def generation_code(
76
- prompt: str | None,
77
  file_path: str | None,
78
  website_url: str | None,
79
- model_name: str,
80
- enable_search: bool,
81
  language: str,
82
- state_history: History | None,
83
- ) -> Tuple[str, History, str, List[dict[str, str]]]:
84
- """Backend function wired to the ✨ Generate button."""
85
- prompt = (prompt or "").strip()
86
- history = state_history or []
87
 
88
- # ------------------------------------------------------------------
89
- # Compose system prompt + context
90
- # ------------------------------------------------------------------
91
- sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
92
- messages = history_to_messages(history, sys_prompt)
93
 
94
- # --- append file / website context --------------------------------
95
- context_parts: list[str] = [prompt]
96
 
97
  if file_path:
98
- context_parts.append("[Reference file]")
99
- context_parts.append(extract_text_from_file(file_path)[:5000])
100
-
101
  if website_url:
102
- website_html = extract_website_content(website_url)
103
- if not website_html.startswith("Error"):
104
- context_parts.append("[Website content]")
105
- context_parts.append(website_html[:8000])
106
 
107
- user_query = "\n\n".join(filter(None, context_parts))
108
  user_query = enhance_query_with_search(user_query, enable_search)
109
  messages.append({"role": "user", "content": user_query})
110
 
111
- # ------------------------------------------------------------------
112
- # Call model via inference.py provider routing handled inside
113
- # ------------------------------------------------------------------
114
- model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0]
115
- try:
116
- assistant_reply = chat_completion(model.id, messages)
117
- except Exception as exc: # pragma: no cover
118
- err_msg = f"❌ **Generation error**\n```{exc}```"
119
- new_history = history + [(prompt, err_msg)]
120
- return "", new_history, "", history_to_chatbot_messages(new_history)
121
-
122
- # ------------------------------------------------------------------
123
- # Post‑process output
124
- # ------------------------------------------------------------------
125
  if language == "transformers.js":
126
- files = parse_transformers_js_output(assistant_reply)
127
- code_out = format_transformers_js_output(files)
128
- preview_html = send_to_sandbox(files.get("index.html", ""))
129
  else:
130
- cleaned = remove_code_block(assistant_reply)
131
- # search/replace patching for iterative edits
132
  if history and not history[-1][1].startswith("❌"):
133
  cleaned = apply_search_replace_changes(history[-1][1], cleaned)
134
- code_out = cleaned
135
- preview_html = send_to_sandbox(cleaned) if language == "html" else ""
136
-
137
- new_history = history + [(prompt, code_out)]
138
- chat_history = history_to_chatbot_messages(new_history)
139
- return code_out, new_history, preview_html, chat_history
140
-
141
-
142
- # ------------------------------------------------------------------
143
- # Gradio UI
144
- # ------------------------------------------------------------------
145
- THEME = gr.themes.Soft(primary_hue="blue")
146
-
147
- with gr.Blocks(theme=THEME, title="AnyCoder / Shasha AI") as demo:
148
- state_history = gr.State([])
149
-
150
- # -------------------- sidebar (inputs) ---------------------------
151
- with gr.Row():
152
- with gr.Column(scale=1):
153
- gr.Markdown("### 1 · Model")
154
- model_dd = gr.Dropdown(
155
- choices=[m.name for m in AVAILABLE_MODELS],
156
- value=AVAILABLE_MODELS[0].name,
157
- label="AI Model",
158
- )
159
-
160
- gr.Markdown("### 2 · Context")
161
- with gr.Tabs():
162
- with gr.Tab("Prompt"):
163
- prompt_box = gr.Textbox(lines=6, placeholder="Describe what you need...")
164
- with gr.Tab("File"):
165
- file_box = gr.File(type="filepath")
166
- with gr.Tab("Website"):
167
- url_box = gr.Textbox(placeholder="https://example.com")
168
-
169
- gr.Markdown("### 3 · Output")
170
- lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Language")
171
- search_chk = gr.Checkbox(label="Enable Tavily Web Search")
172
-
173
- with gr.Row():
174
- clear_btn = gr.Button("Clear", variant="secondary")
175
- gen_btn = gr.Button("Generate ✨", variant="primary")
176
-
177
- # -------------------- main panel (outputs) --------------------
178
- with gr.Column(scale=2):
179
- with gr.Tabs():
180
- with gr.Tab("Code"):
181
- code_out = gr.Code(interactive=True)
182
- with gr.Tab("Preview"):
183
- preview_out = gr.HTML()
184
- with gr.Tab("History"):
185
- chat_out = gr.Chatbot(type="messages")
186
-
187
- # -------------------- callbacks ----------------------------------
188
- gen_btn.click(
189
- generation_code,
190
  inputs=[
191
- prompt_box,
192
- file_box,
193
- url_box,
194
- model_dd,
195
- search_chk,
196
- lang_dd,
197
- state_history,
198
  ],
199
- outputs=[code_out, state_history, preview_out, chat_out],
200
- )
201
-
202
- clear_btn.click(
203
- lambda: ("", None, "", [], "", "", []),
204
- outputs=[prompt_box, file_box, url_box, state_history, code_out, preview_out, chat_out],
205
- queue=False,
206
  )
207
 
208
- # ------------------------------------------------------------------
209
  if __name__ == "__main__":
210
  demo.queue().launch()
 
1
  # app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  """
3
+ AnyCoder / Shasha AI – Gradio back‑end
4
 
5
+ Shows the custom front‑end shipped in index.html (plus static/style.css & static/index.js).
6
+ • Exposes one JSON endpoint (`POST /run/predict`) that the JS front‑end
7
+ calls to run model inference.
8
+ • Keeps all existing helpers (hf_client, inference, utils, deploy …).
9
+ """
10
 
11
+ from pathlib import Path
12
+ from typing import List, Tuple, Dict, Any
13
 
14
  import gradio as gr
15
 
16
+ # ---- local helpers (unchanged) --------------------------------------------
17
+ from inference import chat_completion
 
18
  from tavily_search import enhance_query_with_search
19
+ from deploy import send_to_sandbox
20
+ from models import AVAILABLE_MODELS, find_model, ModelInfo
21
+ from utils import (
22
  extract_text_from_file,
23
  extract_website_content,
 
 
24
  history_to_messages,
25
+ history_to_chatbot_messages,
26
+ apply_search_replace_changes,
27
  remove_code_block,
28
+ parse_transformers_js_output,
29
+ format_transformers_js_output,
30
  )
31
 
32
+ # ------------------- constants ---------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
33
  SYSTEM_PROMPTS = {
34
  "html": (
35
+ "ONLY USE HTML, CSS AND JAVASCRIPT. Return ONE html file "
36
+ "wrapped in ```html ...```."
37
  ),
38
  "transformers.js": (
39
+ "Generate THREE separate files (index.html / index.js / style.css) "
40
+ "as three fenced blocks."
41
  ),
42
  }
 
 
 
 
 
43
  History = List[Tuple[str, str]]
44
 
45
+ # ------------------- core callback -----------------------------------------
46
+ def generate(
47
+ prompt: str,
48
  file_path: str | None,
49
  website_url: str | None,
50
+ model_id: str,
 
51
  language: str,
52
+ enable_search: bool,
53
+ history: History | None,
54
+ ) -> Tuple[str, History]:
55
+ """Called by the JS front‑end via fetch('/run/predict')."""
56
+ history = history or []
57
 
58
+ # --- build system + messages ------------------------------------------
59
+ system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
60
+ messages = history_to_messages(history, system_prompt)
 
 
61
 
62
+ ctx_parts: list[str] = [prompt.strip()]
 
63
 
64
  if file_path:
65
+ ctx_parts.append("[File]")
66
+ ctx_parts.append(extract_text_from_file(file_path)[:5000])
 
67
  if website_url:
68
+ html = extract_website_content(website_url)
69
+ if not html.startswith("Error"):
70
+ ctx_parts.append("[Website]")
71
+ ctx_parts.append(html[:8000])
72
 
73
+ user_query = "\n\n".join(filter(None, ctx_parts))
74
  user_query = enhance_query_with_search(user_query, enable_search)
75
  messages.append({"role": "user", "content": user_query})
76
 
77
+ # --- run model ---------------------------------------------------------
78
+ model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
79
+ answer = chat_completion(model.id, messages)
80
+
81
+ # --- post‑process ------------------------------------------------------
 
 
 
 
 
 
 
 
 
82
  if language == "transformers.js":
83
+ files = parse_transformers_js_output(answer)
84
+ code = format_transformers_js_output(files)
 
85
  else:
86
+ cleaned = remove_code_block(answer)
 
87
  if history and not history[-1][1].startswith("❌"):
88
  cleaned = apply_search_replace_changes(history[-1][1], cleaned)
89
+ code = cleaned
90
+
91
+ history.append((prompt, code))
92
+ return code, history
93
+
94
+ # ------------------- read custom HTML --------------------------------------
95
+ HTML_SOURCE = Path("index.html").read_text(encoding="utf‑8")
96
+
97
+ # ------------------- Gradio UI ---------------------------------------------
98
+ with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
99
+ # 1 visible: your own UI
100
+ gr.HTML(HTML_SOURCE, sanitize=False)
101
+
102
+ # 2 hidden: API inputs / outputs
103
+ with gr.Group(visible=False) as api:
104
+ prompt_in = gr.Textbox()
105
+ file_in = gr.File()
106
+ url_in = gr.Textbox()
107
+ model_in = gr.Textbox()
108
+ lang_in = gr.Textbox()
109
+ search_in = gr.Checkbox()
110
+ hist_state = gr.State([])
111
+
112
+ code_out, hist_out = gr.Textbox(), gr.State([])
113
+
114
+ # expose /run/predict
115
+ api_btn = gr.Button(visible=False)
116
+ api_btn.click(
117
+ fn=generate,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  inputs=[
119
+ prompt_in, file_in, url_in,
120
+ model_in, lang_in, search_in, hist_state
 
 
 
 
 
121
  ],
122
+ outputs=[code_out, hist_out],
123
+ api_name="predict",
 
 
 
 
 
124
  )
125
 
 
126
  if __name__ == "__main__":
127
  demo.queue().launch()