mgbam commited on
Commit
49d4630
Β·
verified Β·
1 Parent(s): e0b040a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -97
app.py CHANGED
@@ -1,67 +1,61 @@
1
  """
2
- app.py – Gradio front‑end for β€œAnyCoderΒ AI” (a.k.a. Shasha AI)
3
 
4
- UI : single‑page, 3‑column layout
5
- Logo : assets/logo.png (120β€―px wide, centred)
6
- SDK : Gradio 5.38.2 (no `height=` arg on gr.Code)
7
  """
8
 
9
  from __future__ import annotations
10
-
11
  import gradio as gr
12
- from typing import List, Tuple, Dict, Optional, Any
13
 
14
- # ── local helpers ----------------------------------------------------------
15
- from constants import ( # all kept in one place
16
- SEARCH_START, DIVIDER, REPLACE_END,
17
  HTML_SYSTEM_PROMPT, HTML_SYSTEM_PROMPT_WITH_SEARCH,
18
  TRANSFORMERS_JS_SYSTEM_PROMPT, TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH,
19
  GENERIC_SYSTEM_PROMPT, GENERIC_SYSTEM_PROMPT_WITH_SEARCH,
20
- SYSTEM_PROMPTS, FollowUpSystemPrompt,
21
- TransformersJSFollowUpSystemPrompt,
22
- AVAILABLE_MODELS, DEMO_LIST,
23
- get_gradio_language,
24
  )
25
-
26
- from hf_client import get_inference_client
27
- from tavily_search import enhance_query_with_search
28
- from utils import (
29
  extract_text_from_file, extract_website_content,
30
  history_to_messages, history_to_chatbot_messages,
31
- remove_code_block, parse_transformers_js_output, format_transformers_js_output,
32
- apply_search_replace_changes, apply_transformers_js_search_replace_changes,
33
  )
34
- from deploy import send_to_sandbox
35
- from search_replace import SEARCH_START as SR_START # just to avoid name clash
36
- # (optional import)
 
 
37
 
38
- # ── type aliases -----------------------------------------------------------
39
  History = List[Tuple[str, str]]
40
- ModelInfo = Dict[str, str]
41
 
42
- # ── generation core --------------------------------------------------------
43
  def generate_code(
44
  prompt: str,
45
  file_path: Optional[str],
46
  website_url: Optional[str],
47
- model: ModelInfo,
48
  language: str,
49
  enable_search: bool,
50
  history: Optional[History],
51
- ) -> Tuple[str, History, str, List[Dict[str, str]]]:
52
-
53
  history = history or []
54
- prompt = prompt or ""
55
 
56
- # 1. choose system prompt ------------------------------------------------
57
  if history:
58
- # modification request
59
- if language == "transformers.js":
60
- system_prompt = TransformersJSFollowUpSystemPrompt
61
- else:
62
- system_prompt = FollowUpSystemPrompt
63
  else:
64
- # fresh generation
65
  if language == "html":
66
  system_prompt = HTML_SYSTEM_PROMPT_WITH_SEARCH if enable_search else HTML_SYSTEM_PROMPT
67
  elif language == "transformers.js":
@@ -74,38 +68,33 @@ def generate_code(
74
 
75
  messages = history_to_messages(history, system_prompt)
76
 
77
- # 2. augment prompt with file / website ---------------------------------
78
  if file_path:
79
- file_txt = extract_text_from_file(file_path)[:5000]
80
- prompt += f"\n\n[Reference file]\n{file_txt}"
81
-
82
  if website_url:
83
- site_ctx = extract_website_content(website_url.strip())
84
- prompt += f"\n\n[Website]\n{site_ctx[:8000]}"
85
 
86
- # 3. optional web‑search enrichment --------------------------------------
87
- user_query = enhance_query_with_search(prompt, enable_search)
88
- messages.append({"role": "user", "content": user_query})
89
 
90
- # 4. call model -----------------------------------------------------------
91
  client = get_inference_client(model["id"])
92
  try:
93
  resp = client.chat.completions.create(
94
  model=model["id"],
95
  messages=messages,
96
- max_tokens=16_000,
97
  temperature=0.1,
98
  )
99
  answer = resp.choices[0].message.content
100
  except Exception as e:
101
  err = f"❌ **Error:**\n```\n{e}\n```"
102
  history.append((prompt, err))
103
- return "", history, "", history_to_chatbot_messages(history)
104
 
105
- # 5. post‑processing ------------------------------------------------------
106
  if language == "transformers.js":
107
- files = parse_transformers_js_output(answer)
108
- code = format_transformers_js_output(files)
109
  preview = send_to_sandbox(files["index.html"]) if files["index.html"] else ""
110
  else:
111
  clean = remove_code_block(answer)
@@ -116,90 +105,82 @@ def generate_code(
116
 
117
  history.append((prompt, code))
118
  chat_msgs = history_to_chatbot_messages(history)
119
-
120
  return code, history, preview, chat_msgs
121
 
 
 
122
 
123
- # ── UI ---------------------------------------------------------------------
124
- THEME = gr.themes.Base(primary_hue="indigo", font="Inter")
125
-
126
- with gr.Blocks(theme=THEME, title="AnyCoderΒ AI") as demo:
127
- state_hist = gr.State([]) # History list
128
- state_model = gr.State(AVAILABLE_MODELS[0])
129
 
130
- # ––– Header with logo –––
131
- with gr.Row():
132
- gr.HTML(
133
- '<div style="text-align:center; margin:1.2rem 0;">'
134
- '<img src="assets/logo.png" alt="AnyCoder logo" style="width:120px;"><br>'
135
- '<h1 style="margin:0.4rem 0 0; font-size:1.9rem;">AnyCoderΒ AI</h1>'
136
- '<p style="color:#555;">Your AI partner for generating, modifying &amp; understanding code.</p>'
137
- '</div>'
138
- )
139
 
140
  with gr.Row():
141
- # ── Sidebar (column‑1) ───────────────────────────────────────────
142
  with gr.Column(scale=1):
143
- gr.Markdown("### 1β€―Β·β€―Selectβ€―Model")
144
- dd_model = gr.Dropdown(
145
- [m["name"] for m in AVAILABLE_MODELS],
146
- value=AVAILABLE_MODELS[0]["name"],
147
- label="AIΒ Model",
148
- )
149
 
150
- gr.Markdown("### 2β€―Β·β€―Provideβ€―Context")
151
  with gr.Tabs():
152
  with gr.Tab("Prompt"):
153
- tb_prompt = gr.Textbox(lines=6, placeholder="Describe what you want to build…")
154
  with gr.Tab("File"):
155
  fi_file = gr.File()
156
  with gr.Tab("Website"):
157
  tb_url = gr.Textbox(placeholder="https://example.com")
158
 
159
- gr.Markdown("### 3β€―Β·β€―Configureβ€―Output")
160
  dd_lang = gr.Dropdown(
161
- GRADIO_SUPPORTED_LANGUAGES[:-1], # drop trailing None
162
  value="html",
163
- label="TargetΒ Language",
164
  )
165
- cb_search = gr.Checkbox(label="Enable Tavily WebΒ Search")
166
 
167
  with gr.Row():
168
- btn_clear = gr.Button("Clear Session", variant="secondary")
169
- btn_gen = gr.Button("GenerateΒ Code", variant="primary")
170
 
171
- # ── Output / preview (column‑2) ──────────────────────────────────
172
  with gr.Column(scale=2):
173
  with gr.Tabs():
174
  with gr.Tab("Code"):
175
- code_out = gr.Code(language="html", lines=25, label="Generated code")
176
  with gr.Tab("Preview"):
177
  html_prev = gr.HTML()
178
  with gr.Tab("History"):
179
  chat_out = gr.Chatbot(type="messages", height=400)
180
 
181
- # ––– Quick‑start buttons –––
182
- gr.Markdown("#### QuickΒ StartΒ Examples")
183
  with gr.Row():
184
- for demo in DEMO_LIST[:6]:
185
- gr.Button(demo["title"], size="sm").click(
186
- lambda d=demo: d["description"], outputs=tb_prompt
187
  )
188
 
189
- # ── Callbacks -----------------------------------------------------------
190
- def _select_model(name: str) -> ModelInfo:
191
- return next((m for m in AVAILABLE_MODELS if m["name"] == name), AVAILABLE_MODELS[0])
192
 
193
- dd_model.change(_select_model, dd_model, state_model)
194
  btn_gen.click(
195
  generate_code,
196
- inputs=[tb_prompt, fi_file, tb_url,
197
- state_model, dd_lang, cb_search, state_hist],
198
- outputs=[code_out, state_hist, html_prev, chat_out],
199
  )
 
200
  btn_clear.click(
201
  lambda: ("", None, "", [], [], "", ""),
202
- outputs=[tb_prompt, fi_file, tb_url, state_hist, chat_out, code_out, html_prev],
203
  queue=False,
204
  )
205
 
 
1
  """
2
+ app.py – AnyCoderΒ AI (Gradio)
3
 
4
+ * Logo: assets/logo.png
5
+ * Models: full list from constants.AVAILABLE_MODELS
6
+ * No height= arg on gr.Code (Gradio β‰₯5)
7
  """
8
 
9
  from __future__ import annotations
 
10
  import gradio as gr
11
+ from typing import List, Tuple, Dict, Optional
12
 
13
+ # ── local modules ──────────────────────────────────────────────────────────
14
+ from constants import (
 
15
  HTML_SYSTEM_PROMPT, HTML_SYSTEM_PROMPT_WITH_SEARCH,
16
  TRANSFORMERS_JS_SYSTEM_PROMPT, TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH,
17
  GENERIC_SYSTEM_PROMPT, GENERIC_SYSTEM_PROMPT_WITH_SEARCH,
18
+ TransformersJSFollowUpSystemPrompt, FollowUpSystemPrompt,
19
+ AVAILABLE_MODELS, DEMO_LIST, get_gradio_language,
 
 
20
  )
21
+ from hf_client import get_inference_client
22
+ from tavily_search import enhance_query_with_search
23
+ from utils import (
 
24
  extract_text_from_file, extract_website_content,
25
  history_to_messages, history_to_chatbot_messages,
26
+ remove_code_block, parse_transformers_js_output,
27
+ format_transformers_js_output,
28
  )
29
+ from search_replace import ( # <-- moved here
30
+ apply_search_replace_changes,
31
+ apply_transformers_js_search_replace_changes,
32
+ )
33
+ from deploy import send_to_sandbox
34
 
35
+ # ── aliases ────────────────────────────────────────────────────────────────
36
  History = List[Tuple[str, str]]
37
+ Model = Dict[str, str]
38
 
39
+ # ── code generation core ───────────────────────────────────────────────────
40
  def generate_code(
41
  prompt: str,
42
  file_path: Optional[str],
43
  website_url: Optional[str],
44
+ model: Model,
45
  language: str,
46
  enable_search: bool,
47
  history: Optional[History],
48
+ ):
 
49
  history = history or []
50
+ prompt = prompt or ""
51
 
52
+ # choose system prompt
53
  if history:
54
+ system_prompt = (
55
+ TransformersJSFollowUpSystemPrompt if language == "transformers.js"
56
+ else FollowUpSystemPrompt
57
+ )
 
58
  else:
 
59
  if language == "html":
60
  system_prompt = HTML_SYSTEM_PROMPT_WITH_SEARCH if enable_search else HTML_SYSTEM_PROMPT
61
  elif language == "transformers.js":
 
68
 
69
  messages = history_to_messages(history, system_prompt)
70
 
71
+ # attach context
72
  if file_path:
73
+ prompt += f"\n\n[File]\n{extract_text_from_file(file_path)[:5000]}"
 
 
74
  if website_url:
75
+ prompt += f"\n\n[Website]\n{extract_website_content(website_url)[:8000]}"
 
76
 
77
+ messages.append({"role": "user", "content": enhance_query_with_search(prompt, enable_search)})
 
 
78
 
79
+ # call model
80
  client = get_inference_client(model["id"])
81
  try:
82
  resp = client.chat.completions.create(
83
  model=model["id"],
84
  messages=messages,
85
+ max_tokens=16000,
86
  temperature=0.1,
87
  )
88
  answer = resp.choices[0].message.content
89
  except Exception as e:
90
  err = f"❌ **Error:**\n```\n{e}\n```"
91
  history.append((prompt, err))
92
+ return err, history, "", history_to_chatbot_messages(history)
93
 
94
+ # post‑process
95
  if language == "transformers.js":
96
+ files = parse_transformers_js_output(answer)
97
+ code = format_transformers_js_output(files)
98
  preview = send_to_sandbox(files["index.html"]) if files["index.html"] else ""
99
  else:
100
  clean = remove_code_block(answer)
 
105
 
106
  history.append((prompt, code))
107
  chat_msgs = history_to_chatbot_messages(history)
 
108
  return code, history, preview, chat_msgs
109
 
110
+ # ── UI ─────────────────────────────────────────────────────────────────────
111
+ theme = gr.themes.Base(primary_hue="indigo", font="Inter")
112
 
113
+ with gr.Blocks(theme=theme, title="AnyCoderΒ AI") as demo:
114
+ st_hist = gr.State([])
115
+ st_model = gr.State(AVAILABLE_MODELS[0])
 
 
 
116
 
117
+ # header with logo
118
+ gr.HTML(
119
+ '<div style="text-align:center;margin:1rem 0;">'
120
+ '<img src="assets/logo.png" alt="logo" style="width:120px;"><br>'
121
+ '<h1 style="margin:0.4rem 0 0">AnyCoderΒ AI</h1>'
122
+ '<p style="color:#555">Your AI partner for generating, modifying &amp; understanding code.</p>'
123
+ '</div>'
124
+ )
 
125
 
126
  with gr.Row():
 
127
  with gr.Column(scale=1):
128
+ gr.Markdown("### 1 Β· Model")
129
+ dd_model = gr.Dropdown([m["name"] for m in AVAILABLE_MODELS],
130
+ value=AVAILABLE_MODELS[0]["name"],
131
+ label="AIΒ Model")
 
 
132
 
133
+ gr.Markdown("### 2 Β· Context")
134
  with gr.Tabs():
135
  with gr.Tab("Prompt"):
136
+ tb_prompt = gr.Textbox(lines=6, placeholder="Describe what you want…")
137
  with gr.Tab("File"):
138
  fi_file = gr.File()
139
  with gr.Tab("Website"):
140
  tb_url = gr.Textbox(placeholder="https://example.com")
141
 
142
+ gr.Markdown("### 3 Β· Output")
143
  dd_lang = gr.Dropdown(
144
+ [l for l in get_gradio_language.__defaults__[0] if l], # supported list
145
  value="html",
146
+ label="Target language"
147
  )
148
+ cb_search = gr.Checkbox(label="Enable Tavily Webβ€―Search")
149
 
150
  with gr.Row():
151
+ btn_clear = gr.Button("Clear", variant="secondary")
152
+ btn_gen = gr.Button("Generateβ€―Code", variant="primary")
153
 
 
154
  with gr.Column(scale=2):
155
  with gr.Tabs():
156
  with gr.Tab("Code"):
157
+ code_out = gr.Code(language="html", lines=25)
158
  with gr.Tab("Preview"):
159
  html_prev = gr.HTML()
160
  with gr.Tab("History"):
161
  chat_out = gr.Chatbot(type="messages", height=400)
162
 
163
+ # quick demos
164
+ gr.Markdown("#### QuickΒ Start")
165
  with gr.Row():
166
+ for d in DEMO_LIST[:6]:
167
+ gr.Button(d["title"], size="sm").click(
168
+ lambda desc=d["description"]: desc, outputs=tb_prompt
169
  )
170
 
171
+ # callbacks
172
+ dd_model.change(lambda n: next(m for m in AVAILABLE_MODELS if m["name"] == n),
173
+ dd_model, st_model)
174
 
 
175
  btn_gen.click(
176
  generate_code,
177
+ inputs=[tb_prompt, fi_file, tb_url, st_model, dd_lang, cb_search, st_hist],
178
+ outputs=[code_out, st_hist, html_prev, chat_out],
 
179
  )
180
+
181
  btn_clear.click(
182
  lambda: ("", None, "", [], [], "", ""),
183
+ outputs=[tb_prompt, fi_file, tb_url, st_hist, chat_out, code_out, html_prev],
184
  queue=False,
185
  )
186