mgbam commited on
Commit
c558be9
Β·
verified Β·
1 Parent(s): e264e9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -25
app.py CHANGED
@@ -1,37 +1,129 @@
1
- # app.py (root)
 
 
 
 
 
 
 
 
 
 
 
2
  from pathlib import Path
 
 
3
  import gradio as gr
4
- from inference import chat_completion # your back‑end helpers
 
 
5
  from tavily_search import enhance_query_with_search
6
- from utils import (extract_text_from_file, extract_website_content,
7
- history_to_messages, apply_search_replace_changes,
8
- remove_code_block, parse_transformers_js_output,
9
- format_transformers_js_output)
10
- from models import AVAILABLE_MODELS, find_model
11
  from deploy import send_to_sandbox
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # ---------- backend callback -------------
14
- def generate(payload):
15
- prompt = payload["prompt"]
16
- model = find_model(payload["model_id"])
17
- lang = payload["language"]
18
- history = payload.get("history", [])
19
- # … same logic as before, returns {"code": ..., "history": [...] }
20
- # (keep your existing generate() from the compact app.py)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  return code, history
22
 
23
- # ---------- UI ---------------------------
24
- INDEX = Path("static/index.html").read_text(encoding="utf-8")
25
 
 
 
 
 
26
  with gr.Blocks(css="body{margin:0}", title="AnyCoderΒ AI") as demo:
27
- # custom front‑end
28
- gr.HTML(INDEX, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- # hidden API endpoints
31
- generator = gr.JSON(
32
- label="hidden",
33
- visible=False
34
- ).api(generate, path="/run/predict", methods=["POST"])
 
 
 
35
 
 
36
  if __name__ == "__main__":
37
- demo.launch()
 
1
+ # app.py
2
+ # --------------------------------------------------------------------
3
+ # AnyCoderΒ /Β ShashaΒ AI – Gradio back‑end
4
+ # --------------------------------------------------------------------
5
+ """
6
+ β€’ Renders the custom front‑end stored in index.html (+ static assets).
7
+ β€’ Provides one API route (`POST /run/predict`) the JS front‑end calls
8
+ to run model inference.
9
+ β€’ Relies on helper modules (inference.py, models.py, utils.py, …)
10
+ exactly as you already have them.
11
+ """
12
+
13
  from pathlib import Path
14
+ from typing import List, Tuple
15
+
16
  import gradio as gr
17
+
18
+ # ── local helpers (unchanged) ────────────────────────────────────────
19
+ from inference import chat_completion
20
  from tavily_search import enhance_query_with_search
21
+ from models import AVAILABLE_MODELS, find_model, ModelInfo
 
 
 
 
22
  from deploy import send_to_sandbox
23
+ from utils import (
24
+ extract_text_from_file,
25
+ extract_website_content,
26
+ history_to_messages,
27
+ history_to_chatbot_messages,
28
+ apply_search_replace_changes,
29
+ remove_code_block,
30
+ parse_transformers_js_output,
31
+ format_transformers_js_output,
32
+ )
33
+
34
+ # ── constants ────────────────────────────────────────────────────────
35
+ History = List[Tuple[str, str]]
36
+
37
+ SYSTEM_PROMPTS = {
38
+ "html": (
39
+ "ONLY USE HTML, CSS AND JAVASCRIPT. Return ONE html file "
40
+ "wrapped in ```html ...```."
41
+ ),
42
+ "transformers.js": (
43
+ "Generate THREE separate files (index.html / index.js / style.css) "
44
+ "as three fenced blocks."
45
+ ),
46
+ }
47
+
48
+ # ── core back‑end callback ───────────────────────────────────────────
49
+ def generate(
50
+ prompt: str,
51
+ file_path: str | None,
52
+ website_url: str | None,
53
+ model_id: str,
54
+ language: str,
55
+ enable_search: bool,
56
+ history: History | None,
57
+ ) -> Tuple[str, History]:
58
+ """Backend for /run/predict."""
59
+ history = history or []
60
+
61
+ # 1) system prompt + history
62
+ system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
63
+ messages = history_to_messages(history, system_prompt)
64
+
65
+ ctx_parts: list[str] = [prompt.strip()]
66
 
67
+ if file_path:
68
+ ctx_parts.append("[File]")
69
+ ctx_parts.append(extract_text_from_file(file_path)[:5_000])
70
+ if website_url:
71
+ html = extract_website_content(website_url)
72
+ if not html.startswith("Error"):
73
+ ctx_parts.append("[Website]")
74
+ ctx_parts.append(html[:8_000])
75
+
76
+ user_query = "\n\n".join(filter(None, ctx_parts))
77
+ user_query = enhance_query_with_search(user_query, enable_search)
78
+ messages.append({"role": "user", "content": user_query})
79
+
80
+ # 2) model call
81
+ model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
82
+ answer = chat_completion(model.id, messages)
83
+
84
+ # 3) post‑processing
85
+ if language == "transformers.js":
86
+ files = parse_transformers_js_output(answer)
87
+ code = format_transformers_js_output(files)
88
+ else:
89
+ cleaned = remove_code_block(answer)
90
+ if history and not history[-1][1].startswith("❌"):
91
+ cleaned = apply_search_replace_changes(history[-1][1], cleaned)
92
+ code = cleaned
93
+
94
+ history.append((prompt, code))
95
  return code, history
96
 
 
 
97
 
98
+ # ── read the custom HTML front‑end ───────────────────────────────────
99
+ INDEX = Path("index.html").read_text(encoding="utf-8")
100
+
101
+ # ── Gradio UI (wrapper only) ─────────────────────────────────────────
102
  with gr.Blocks(css="body{margin:0}", title="AnyCoderΒ AI") as demo:
103
+ # 1Β visible: your static front‑end
104
+ gr.HTML(INDEX) # ← NO unsafe_allow_html / sanitize
105
+
106
+ # 2Β hidden components for the API call wiring
107
+ with gr.Group(visible=False) as api:
108
+ prompt_in = gr.Textbox()
109
+ file_in = gr.File()
110
+ url_in = gr.Textbox()
111
+ model_in = gr.Textbox()
112
+ lang_in = gr.Textbox()
113
+ search_in = gr.Checkbox()
114
+ hist_state = gr.State([])
115
+
116
+ code_out, hist_out = gr.Textbox(), gr.State([])
117
 
118
+ # bind /run/predict
119
+ trig = gr.Button(visible=False)
120
+ trig.click(
121
+ generate,
122
+ inputs=[prompt_in, file_in, url_in, model_in, lang_in, search_in, hist_state],
123
+ outputs=[code_out, hist_out],
124
+ api_name="predict",
125
+ )
126
 
127
+ # ── static assets (.css / .js) are picked up automatically by HFΒ Spaces
128
  if __name__ == "__main__":
129
+ demo.queue().launch()