mgbam commited on
Commit
e0b040a
·
verified ·
1 Parent(s): 517f963

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -143
app.py CHANGED
@@ -1,169 +1,207 @@
1
- # app.py
2
  """
3
- Main application file for Shasha AI, an AI‑assisted code‑generation tool built
4
- with Gradio.
5
- Generates code in dozens of languages with multiple OSS / proprietary models
6
- Accepts plain prompts, reference files, or a web‑site URL to redesign
7
- Optional Tavily web‑search augmentation
8
- • Live HTML preview, import‑existing‑project, and one‑click Space deploy
9
  """
10
 
11
  from __future__ import annotations
12
 
13
- import os, time, urllib.parse, tempfile, webbrowser
14
- from typing import Optional, Dict, List, Tuple, Any
15
-
16
  import gradio as gr
17
- from huggingface_hub import HfApi
18
- from tavily import TavilyClient
19
 
20
- # ──────────────────────────── local modules ──────────────────────────────
21
- from constants import (
 
22
  HTML_SYSTEM_PROMPT, HTML_SYSTEM_PROMPT_WITH_SEARCH,
23
  TRANSFORMERS_JS_SYSTEM_PROMPT, TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH,
24
- SVELTE_SYSTEM_PROMPT, SVELTE_SYSTEM_PROMPT_WITH_SEARCH,
25
  GENERIC_SYSTEM_PROMPT, GENERIC_SYSTEM_PROMPT_WITH_SEARCH,
26
- TransformersJSFollowUpSystemPrompt, FollowUpSystemPrompt,
27
- SEARCH_START, DIVIDER, REPLACE_END,
28
- AVAILABLE_MODELS, DEMO_LIST, GRADIO_SUPPORTED_LANGUAGES,
 
29
  )
30
- from hf_client import get_inference_client, HF_TOKEN
31
- from tavily_search import enhance_query_with_search, tavily_client
 
32
  from utils import (
 
33
  history_to_messages, history_to_chatbot_messages,
34
- remove_code_block, parse_transformers_js_output,
35
- format_transformers_js_output, parse_svelte_output,
36
- format_svelte_output, apply_search_replace_changes,
37
- apply_transformers_js_search_replace_changes, get_gradio_language,
38
  )
39
- from web_scraper import extract_website_content
40
- from search_replace import apply_search_replace_changes # alias kept for clarity
41
- from deploy import send_to_sandbox, deploy_to_user_space
42
- from web_scraper import extract_text_from_file, extract_text_from_image
43
- # ─────────────────────────────────────────────────────────────────────────
44
 
45
-
46
- # ==== type aliases ====
47
  History = List[Tuple[str, str]]
48
- Model = Dict[str, Any]
49
-
50
- # ==== helpers ====
51
- def get_model_details(name: str) -> Model:
52
- return next((m for m in AVAILABLE_MODELS if m["name"] == name), AVAILABLE_MODELS[0])
53
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- # -------------------- Gradio UI ----------------------------------------
56
- CUSTOM_CSS = """
57
- #brand_logo{margin-right:.5rem;border-radius:8px}
58
- body{font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,sans-serif}
59
- #main_title{font-size:2rem;margin:0}
60
- #subtitle{color:#4a5568;margin-bottom:2rem}
61
- .gradio-container{background:#f7fafc}
62
- #gen_btn{box-shadow:0 4px 6px rgba(0,0,0,.1)}
63
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- with gr.Blocks(
66
- theme=gr.themes.Soft(primary_hue="blue"),
67
- css=CUSTOM_CSS,
68
- title="Shasha AI"
69
- ) as demo:
70
-
71
- # ────────── states ──────────
72
- history_state : gr.State = gr.State([])
73
- model_state : gr.State = gr.State(AVAILABLE_MODELS[0])
74
- provider_state: gr.State = gr.State("auto")
75
-
76
- # ────────── header with logo ──────────
77
- with gr.Row(elem_id="header"):
78
- gr.Image(value="assets/logo.png",
79
- width=48, height=48,
80
- show_label=False, container=False,
81
- elem_id="brand_logo")
82
- with gr.Column():
83
- gr.Markdown("## 🚀 Shasha AI", elem_id="main_title")
84
- gr.Markdown(
85
- "Your AI partner for generating, modifying, and understanding code.",
86
- elem_id="subtitle"
87
  )
88
 
89
- # ────────── sidebar (inputs) ──────────
90
- with gr.Sidebar():
91
- gr.Markdown("### 1 · Model")
92
- model_dd = gr.Dropdown(
93
- choices=[m["name"] for m in AVAILABLE_MODELS],
94
- value=AVAILABLE_MODELS[0]["name"],
95
- label="AI Model"
96
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- gr.Markdown("### 2 · Context")
99
- with gr.Tabs():
100
- with gr.Tab("📝 Prompt"):
101
- prompt_in = gr.Textbox(lines=7, placeholder="Describe what you’d like…")
102
- with gr.Tab("📄 File"):
103
- file_in = gr.File(type="filepath")
104
- with gr.Tab("🌐 Website"):
105
- url_in = gr.Textbox(placeholder="https://example.com")
106
-
107
- gr.Markdown("### 3 · Output")
108
- lang_dd = gr.Dropdown(
109
- choices=GRADIO_SUPPORTED_LANGUAGES,
110
- value="html",
111
- label="Target Language"
112
- )
113
- search_chk = gr.Checkbox(label="Enable Tavily Web Search")
114
-
115
- with gr.Row():
116
- clr_btn = gr.Button("Clear Session", variant="secondary")
117
- gen_btn = gr.Button("Generate Code", variant="primary", elem_id="gen_btn")
118
-
119
- # ────────── main panel (outputs) ──────────
120
- with gr.Tabs():
121
- with gr.Tab("💻 Code"):
122
- code_out = gr.Code(language="html", lines=25, interactive=True)
123
- with gr.Tab("👁️ Live Preview"):
124
- preview_out = gr.HTML()
125
- with gr.Tab("📜 History"):
126
- chat_out = gr.Chatbot(type="messages")
127
-
128
- # ────────── callbacks ──────────
129
- def generation_code(
130
- query : Optional[str],
131
- file_path : Optional[str],
132
- website_url : Optional[str],
133
- current_model: Model,
134
- enable_search: bool,
135
- language : str,
136
- history : Optional[History],
137
- ):
138
- # (implementation identical to previous working version…)
139
- # For brevity, assume the body of generation_code remains unchanged.
140
- ...
141
-
142
- # dropdown change
143
- def _on_model_change(name): return get_model_details(name)
144
-
145
- model_dd.change(
146
- _on_model_change,
147
- inputs=model_dd,
148
- outputs=model_state
149
- )
150
 
151
- # generate button
152
- gen_btn.click(
153
- generation_code,
154
- inputs=[prompt_in, file_in, url_in, model_state,
155
- search_chk, lang_dd, history_state],
156
- outputs=[code_out, history_state, preview_out, chat_out]
157
  )
158
-
159
- # clear
160
- def _reset(): return "", None, "", [], "", ""
161
- clr_btn.click(
162
- _reset,
163
- outputs=[prompt_in, file_in, url_in,
164
- history_state, code_out, preview_out, chat_out],
165
- queue=False
166
  )
167
 
168
  if __name__ == "__main__":
169
- demo.queue().launch()
 
 
1
  """
2
+ app.py – Gradio front‑end for “AnyCoder AI (a.k.a. Shasha AI)
3
+
4
+ UI : single‑page, 3‑column layout
5
+ Logo : assets/logo.png (120 px wide, centred)
6
+ SDK : Gradio 5.38.2 (no `height=` arg on gr.Code)
 
7
  """
8
 
9
  from __future__ import annotations
10
 
 
 
 
11
  import gradio as gr
12
+ from typing import List, Tuple, Dict, Optional, Any
 
13
 
14
+ # ── local helpers ----------------------------------------------------------
15
+ from constants import ( # all kept in one place
16
+ SEARCH_START, DIVIDER, REPLACE_END,
17
  HTML_SYSTEM_PROMPT, HTML_SYSTEM_PROMPT_WITH_SEARCH,
18
  TRANSFORMERS_JS_SYSTEM_PROMPT, TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH,
 
19
  GENERIC_SYSTEM_PROMPT, GENERIC_SYSTEM_PROMPT_WITH_SEARCH,
20
+ SYSTEM_PROMPTS, FollowUpSystemPrompt,
21
+ TransformersJSFollowUpSystemPrompt,
22
+ AVAILABLE_MODELS, DEMO_LIST,
23
+ get_gradio_language,
24
  )
25
+
26
+ from hf_client import get_inference_client
27
+ from tavily_search import enhance_query_with_search
28
  from utils import (
29
+ extract_text_from_file, extract_website_content,
30
  history_to_messages, history_to_chatbot_messages,
31
+ remove_code_block, parse_transformers_js_output, format_transformers_js_output,
32
+ apply_search_replace_changes, apply_transformers_js_search_replace_changes,
 
 
33
  )
34
+ from deploy import send_to_sandbox
35
+ from search_replace import SEARCH_START as SR_START # just to avoid name clash
36
+ # (optional import)
 
 
37
 
38
+ # ── type aliases -----------------------------------------------------------
 
39
  History = List[Tuple[str, str]]
40
+ ModelInfo = Dict[str, str]
41
+
42
+ # ── generation core --------------------------------------------------------
43
+ def generate_code(
44
+ prompt: str,
45
+ file_path: Optional[str],
46
+ website_url: Optional[str],
47
+ model: ModelInfo,
48
+ language: str,
49
+ enable_search: bool,
50
+ history: Optional[History],
51
+ ) -> Tuple[str, History, str, List[Dict[str, str]]]:
52
+
53
+ history = history or []
54
+ prompt = prompt or ""
55
+
56
+ # 1. choose system prompt ------------------------------------------------
57
+ if history:
58
+ # modification request
59
+ if language == "transformers.js":
60
+ system_prompt = TransformersJSFollowUpSystemPrompt
61
+ else:
62
+ system_prompt = FollowUpSystemPrompt
63
+ else:
64
+ # fresh generation
65
+ if language == "html":
66
+ system_prompt = HTML_SYSTEM_PROMPT_WITH_SEARCH if enable_search else HTML_SYSTEM_PROMPT
67
+ elif language == "transformers.js":
68
+ system_prompt = TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH if enable_search else TRANSFORMERS_JS_SYSTEM_PROMPT
69
+ else:
70
+ system_prompt = (
71
+ GENERIC_SYSTEM_PROMPT_WITH_SEARCH.format(language=language)
72
+ if enable_search else GENERIC_SYSTEM_PROMPT.format(language=language)
73
+ )
74
 
75
+ messages = history_to_messages(history, system_prompt)
76
+
77
+ # 2. augment prompt with file / website ---------------------------------
78
+ if file_path:
79
+ file_txt = extract_text_from_file(file_path)[:5000]
80
+ prompt += f"\n\n[Reference file]\n{file_txt}"
81
+
82
+ if website_url:
83
+ site_ctx = extract_website_content(website_url.strip())
84
+ prompt += f"\n\n[Website]\n{site_ctx[:8000]}"
85
+
86
+ # 3. optional web‑search enrichment --------------------------------------
87
+ user_query = enhance_query_with_search(prompt, enable_search)
88
+ messages.append({"role": "user", "content": user_query})
89
+
90
+ # 4. call model -----------------------------------------------------------
91
+ client = get_inference_client(model["id"])
92
+ try:
93
+ resp = client.chat.completions.create(
94
+ model=model["id"],
95
+ messages=messages,
96
+ max_tokens=16_000,
97
+ temperature=0.1,
98
+ )
99
+ answer = resp.choices[0].message.content
100
+ except Exception as e:
101
+ err = f"❌ **Error:**\n```\n{e}\n```"
102
+ history.append((prompt, err))
103
+ return "", history, "", history_to_chatbot_messages(history)
104
+
105
+ # 5. post‑processing ------------------------------------------------------
106
+ if language == "transformers.js":
107
+ files = parse_transformers_js_output(answer)
108
+ code = format_transformers_js_output(files)
109
+ preview = send_to_sandbox(files["index.html"]) if files["index.html"] else ""
110
+ else:
111
+ clean = remove_code_block(answer)
112
+ if history and not history[-1][1].startswith("❌"):
113
+ clean = apply_search_replace_changes(history[-1][1], clean)
114
+ code = clean
115
+ preview = send_to_sandbox(code) if language == "html" else ""
116
+
117
+ history.append((prompt, code))
118
+ chat_msgs = history_to_chatbot_messages(history)
119
+
120
+ return code, history, preview, chat_msgs
121
+
122
+
123
+ # ── UI ---------------------------------------------------------------------
124
+ THEME = gr.themes.Base(primary_hue="indigo", font="Inter")
125
+
126
+ with gr.Blocks(theme=THEME, title="AnyCoder AI") as demo:
127
+ state_hist = gr.State([]) # History list
128
+ state_model = gr.State(AVAILABLE_MODELS[0])
129
+
130
+ # ––– Header with logo –––
131
+ with gr.Row():
132
+ gr.HTML(
133
+ '<div style="text-align:center; margin:1.2rem 0;">'
134
+ '<img src="assets/logo.png" alt="AnyCoder logo" style="width:120px;"><br>'
135
+ '<h1 style="margin:0.4rem 0 0; font-size:1.9rem;">AnyCoder AI</h1>'
136
+ '<p style="color:#555;">Your AI partner for generating, modifying &amp; understanding code.</p>'
137
+ '</div>'
138
+ )
139
 
140
+ with gr.Row():
141
+ # ── Sidebar (column‑1) ───────────────────────────────────────────
142
+ with gr.Column(scale=1):
143
+ gr.Markdown("### 1 · Select Model")
144
+ dd_model = gr.Dropdown(
145
+ [m["name"] for m in AVAILABLE_MODELS],
146
+ value=AVAILABLE_MODELS[0]["name"],
147
+ label="AI Model",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  )
149
 
150
+ gr.Markdown("### 2 · Provide Context")
151
+ with gr.Tabs():
152
+ with gr.Tab("Prompt"):
153
+ tb_prompt = gr.Textbox(lines=6, placeholder="Describe what you want to build…")
154
+ with gr.Tab("File"):
155
+ fi_file = gr.File()
156
+ with gr.Tab("Website"):
157
+ tb_url = gr.Textbox(placeholder="https://example.com")
158
+
159
+ gr.Markdown("### 3 · Configure Output")
160
+ dd_lang = gr.Dropdown(
161
+ GRADIO_SUPPORTED_LANGUAGES[:-1], # drop trailing None
162
+ value="html",
163
+ label="Target Language",
164
+ )
165
+ cb_search = gr.Checkbox(label="Enable Tavily Web Search")
166
+
167
+ with gr.Row():
168
+ btn_clear = gr.Button("Clear Session", variant="secondary")
169
+ btn_gen = gr.Button("Generate Code", variant="primary")
170
+
171
+ # ── Output / preview (column‑2) ──────────────────────────────────
172
+ with gr.Column(scale=2):
173
+ with gr.Tabs():
174
+ with gr.Tab("Code"):
175
+ code_out = gr.Code(language="html", lines=25, label="Generated code")
176
+ with gr.Tab("Preview"):
177
+ html_prev = gr.HTML()
178
+ with gr.Tab("History"):
179
+ chat_out = gr.Chatbot(type="messages", height=400)
180
+
181
+ # ––– Quick‑start buttons –––
182
+ gr.Markdown("#### Quick Start Examples")
183
+ with gr.Row():
184
+ for demo in DEMO_LIST[:6]:
185
+ gr.Button(demo["title"], size="sm").click(
186
+ lambda d=demo: d["description"], outputs=tb_prompt
187
+ )
188
 
189
+ # ── Callbacks -----------------------------------------------------------
190
+ def _select_model(name: str) -> ModelInfo:
191
+ return next((m for m in AVAILABLE_MODELS if m["name"] == name), AVAILABLE_MODELS[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
+ dd_model.change(_select_model, dd_model, state_model)
194
+ btn_gen.click(
195
+ generate_code,
196
+ inputs=[tb_prompt, fi_file, tb_url,
197
+ state_model, dd_lang, cb_search, state_hist],
198
+ outputs=[code_out, state_hist, html_prev, chat_out],
199
  )
200
+ btn_clear.click(
201
+ lambda: ("", None, "", [], [], "", ""),
202
+ outputs=[tb_prompt, fi_file, tb_url, state_hist, chat_out, code_out, html_prev],
203
+ queue=False,
 
 
 
 
204
  )
205
 
206
  if __name__ == "__main__":
207
+ demo.launch()