mgbam commited on
Commit
a18bd58
·
verified ·
1 Parent(s): 0d7841a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -54
app.py CHANGED
@@ -1,64 +1,89 @@
1
- # app.py ── root of the repo
2
  """
3
- AnyCoder / Shasha AI Gradio back‑end
4
- Hosts the custom HTML/JS/CSS in /static
5
- • Exposes POST /run/predict for the browserside fetch()
 
 
 
 
 
 
6
  """
7
- from __future__ import annotations
8
  from pathlib import Path
9
  from typing import List, Tuple
10
 
11
  import gradio as gr
12
 
13
- from inference import chat_completion
14
- from tavily_search import enhance_query_with_search
15
- from models import AVAILABLE_MODELS, find_model, ModelInfo
16
- from utils import (
17
- extract_text_from_file, extract_website_content,
18
- history_to_messages, history_to_chatbot_messages,
19
- apply_search_replace_changes, remove_code_block,
20
- parse_transformers_js_output, format_transformers_js_output,
 
 
 
 
21
  )
 
 
22
 
23
  SYSTEM_PROMPTS = {
24
- "html": "ONLY USE HTML, CSS & JS. Return ONE file wrapped in ```html```.",
25
- "transformers.js":"Generate THREE files (index.html / index.js / style.css) as fenced blocks."
 
 
 
 
 
26
  }
 
27
  History = List[Tuple[str, str]]
28
 
29
- # ─────────────────────────────────────────────────────────────────────────────
30
- def generate(prompt:str,
31
- file_path:str|None,
32
- website_url:str|None,
33
- model_id:str,
34
- language:str,
35
- enable_search:bool,
36
- history:History|None) -> Tuple[str,History]:
37
- """Invoked by the JS front‑end."""
 
 
 
38
  history = history or []
 
 
39
  sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
40
- messages = history_to_messages(history, sys_prompt)
 
 
41
 
42
- ctx: list[str] = [prompt.strip()]
43
  if file_path:
44
- ctx.append("[File]\n" + extract_text_from_file(file_path)[:5_000])
 
45
  if website_url:
46
  html = extract_website_content(website_url)
47
  if not html.startswith("Error"):
48
- ctx.append("[Website]\n" + html[:8_000])
49
 
50
- user_q = "\n\n".join(filter(None, ctx))
51
- user_q = enhance_query_with_search(user_q, enable_search)
52
- messages.append({"role": "user", "content": user_q})
53
 
 
54
  model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
55
- answer = chat_completion(model.id, messages)
56
 
 
57
  if language == "transformers.js":
58
- files = parse_transformers_js_output(answer)
59
- code = format_transformers_js_output(files)
60
  else:
61
- cleaned = remove_code_block(answer)
62
  if history and not history[-1][1].startswith("❌"):
63
  cleaned = apply_search_replace_changes(history[-1][1], cleaned)
64
  code = cleaned
@@ -66,28 +91,36 @@ def generate(prompt:str,
66
  history.append((prompt, code))
67
  return code, history
68
 
69
- # ─────────────────────────��───────────────────────────────────────────────────
70
- HTML_SOURCE = Path("static/index.html").read_text(encoding="utf-8")
 
 
 
71
 
72
  with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
73
- gr.HTML(HTML_SOURCE) # the whole UI
74
- # hidden I/O elements for the JS fetch()
75
- with gr.Group(visible=False):
76
- prompt_in = gr.Textbox()
77
- file_in = gr.File()
78
- url_in = gr.Textbox()
79
- model_in = gr.Textbox()
80
- lang_in = gr.Textbox()
81
- search_in = gr.Checkbox()
82
- hist_state = gr.State([])
83
- code_out, hist_out = gr.Textbox(), gr.State([])
84
-
85
- gr.Button(visible=False).click( # POST /run/predict
86
- generate,
87
- [prompt_in, file_in, url_in,
88
- model_in, lang_in, search_in, hist_state],
89
- [code_out, hist_out],
 
 
 
 
90
  api_name="predict",
 
91
  )
92
 
93
  if __name__ == "__main__":
 
 
1
  """
2
+ AnyCoder AI — static‑first UI wrapper
3
+ Loads HTML/CSS/JS from the /static folder and exposes /run/predict for
4
+ the frontend to call.
5
+
6
+ • static/index.html dark themed UI
7
+ • static/style.css styles
8
+ • static/index.js JS logic (model list, fetch /run/predict)
9
+
10
+ Back‑end helpers (models.py, inference.py, plugins.py …) are unchanged.
11
  """
12
+
13
  from pathlib import Path
14
  from typing import List, Tuple
15
 
16
  import gradio as gr
17
 
18
+ # ---------- imports that actually do the work ----------
19
+ from inference import chat_completion # runs the model
20
+ from tavily_search import enhance_query_with_search
21
+ from utils import ( # misc helpers
22
+ extract_text_from_file,
23
+ extract_website_content,
24
+ history_to_messages,
25
+ history_to_chatbot_messages,
26
+ apply_search_replace_changes,
27
+ remove_code_block,
28
+ parse_transformers_js_output,
29
+ format_transformers_js_output,
30
  )
31
+ from models import AVAILABLE_MODELS, find_model, ModelInfo
32
+ # -------------------------------------------------------
33
 
34
  SYSTEM_PROMPTS = {
35
+ "html": (
36
+ "ONLY USE HTML, CSS AND JAVASCRIPT. Return **one** HTML file "
37
+ "wrapped in ```html```."
38
+ ),
39
+ "transformers.js": (
40
+ "Generate THREE fenced blocks: index.html, index.js, style.css."
41
+ ),
42
  }
43
+
44
  History = List[Tuple[str, str]]
45
 
46
+ # ------------------------------------------------------------------
47
+ # /run/predict — called by static/index.js
48
+ # ------------------------------------------------------------------
49
+ def generate(
50
+ prompt: str,
51
+ file_path: str | None,
52
+ website_url: str | None,
53
+ model_id: str,
54
+ language: str,
55
+ enable_search: bool,
56
+ history: History | None,
57
+ ):
58
  history = history or []
59
+
60
+ # 1 · system + user messages
61
  sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
62
+ msgs = history_to_messages(history, sys_prompt)
63
+
64
+ parts = [prompt.strip()]
65
 
 
66
  if file_path:
67
+ parts.append(extract_text_from_file(file_path)[:5_000])
68
+
69
  if website_url:
70
  html = extract_website_content(website_url)
71
  if not html.startswith("Error"):
72
+ parts.append(html[:8_000])
73
 
74
+ user_query = enhance_query_with_search("\n\n".join(filter(None, parts)), enable_search)
75
+ msgs.append({"role": "user", "content": user_query})
 
76
 
77
+ # 2 · run model
78
  model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
79
+ reply = chat_completion(model.id, msgs)
80
 
81
+ # 3 · post‑process
82
  if language == "transformers.js":
83
+ files = parse_transformers_js_output(reply)
84
+ code = format_transformers_js_output(files)
85
  else:
86
+ cleaned = remove_code_block(reply)
87
  if history and not history[-1][1].startswith("❌"):
88
  cleaned = apply_search_replace_changes(history[-1][1], cleaned)
89
  code = cleaned
 
91
  history.append((prompt, code))
92
  return code, history
93
 
94
+
95
+ # ------------------------------------------------------------------
96
+ # Serve static UI
97
+ # ------------------------------------------------------------------
98
+ HTML_SOURCE = Path("static/index.html").read_text(encoding="utf‑8")
99
 
100
  with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
101
+ # Front‑end
102
+ gr.HTML(HTML_SOURCE)
103
+
104
+ # Hidden components for API
105
+ prompt_in = gr.Textbox(visible=False)
106
+ file_in = gr.File(visible=False)
107
+ url_in = gr.Textbox(visible=False)
108
+ model_in = gr.Textbox(visible=False)
109
+ lang_in = gr.Textbox(visible=False)
110
+ search_in = gr.Checkbox(visible=False)
111
+ hist_state = gr.State([])
112
+
113
+ code_out = gr.Textbox(visible=False)
114
+ hist_out = gr.State([])
115
+
116
+ # Expose /run/predict
117
+ dummy_btn = gr.Button(visible=False)
118
+ dummy_btn.click(
119
+ fn=generate,
120
+ inputs=[prompt_in, file_in, url_in, model_in, lang_in, search_in, hist_state],
121
+ outputs=[code_out, hist_out],
122
  api_name="predict",
123
+ queue=True,
124
  )
125
 
126
  if __name__ == "__main__":