mgbam commited on
Commit
e264e9a
·
verified ·
1 Parent(s): 2892395

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -117
app.py CHANGED
@@ -1,127 +1,37 @@
1
- """
2
- AnyCoder AI — static‑first UI wrapper
3
- Loads HTML/CSS/JS from the /static folder and exposes /run/predict for
4
- the front‑end to call.
5
-
6
- • static/index.html dark themed UI
7
- • static/style.css styles
8
- • static/index.js JS logic (model list, fetch /run/predict)
9
-
10
- Back‑end helpers (models.py, inference.py, plugins.py …) are unchanged.
11
- """
12
-
13
  from pathlib import Path
14
- from typing import List, Tuple
15
-
16
  import gradio as gr
17
-
18
- # ---------- imports that actually do the work ----------
19
- from inference import chat_completion # runs the model
20
  from tavily_search import enhance_query_with_search
21
- from utils import ( # misc helpers
22
- extract_text_from_file,
23
- extract_website_content,
24
- history_to_messages,
25
- history_to_chatbot_messages,
26
- apply_search_replace_changes,
27
- remove_code_block,
28
- parse_transformers_js_output,
29
- format_transformers_js_output,
30
- )
31
- from models import AVAILABLE_MODELS, find_model, ModelInfo
32
- # -------------------------------------------------------
33
-
34
- SYSTEM_PROMPTS = {
35
- "html": (
36
- "ONLY USE HTML, CSS AND JAVASCRIPT. Return **one** HTML file "
37
- "wrapped in ```html```."
38
- ),
39
- "transformers.js": (
40
- "Generate THREE fenced blocks: index.html, index.js, style.css."
41
- ),
42
- }
43
-
44
- History = List[Tuple[str, str]]
45
-
46
- # ------------------------------------------------------------------
47
- # /run/predict — called by static/index.js
48
- # ------------------------------------------------------------------
49
- def generate(
50
- prompt: str,
51
- file_path: str | None,
52
- website_url: str | None,
53
- model_id: str,
54
- language: str,
55
- enable_search: bool,
56
- history: History | None,
57
- ):
58
- history = history or []
59
-
60
- # 1 · system + user messages
61
- sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
62
- msgs = history_to_messages(history, sys_prompt)
63
-
64
- parts = [prompt.strip()]
65
-
66
- if file_path:
67
- parts.append(extract_text_from_file(file_path)[:5_000])
68
-
69
- if website_url:
70
- html = extract_website_content(website_url)
71
- if not html.startswith("Error"):
72
- parts.append(html[:8_000])
73
-
74
- user_query = enhance_query_with_search("\n\n".join(filter(None, parts)), enable_search)
75
- msgs.append({"role": "user", "content": user_query})
76
-
77
- # 2 · run model
78
- model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
79
- reply = chat_completion(model.id, msgs)
80
-
81
- # 3 · post‑process
82
- if language == "transformers.js":
83
- files = parse_transformers_js_output(reply)
84
- code = format_transformers_js_output(files)
85
- else:
86
- cleaned = remove_code_block(reply)
87
- if history and not history[-1][1].startswith("❌"):
88
- cleaned = apply_search_replace_changes(history[-1][1], cleaned)
89
- code = cleaned
90
-
91
- history.append((prompt, code))
92
  return code, history
93
 
94
-
95
- # ------------------------------------------------------------------
96
- # Serve static UI
97
- # ------------------------------------------------------------------
98
- HTML_SOURCE = Path("static/index.html").read_text(encoding="utf‑8")
99
 
100
  with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
101
- # Front‑end
102
- gr.HTML(HTML_SOURCE)
103
-
104
- # Hidden components for API
105
- prompt_in = gr.Textbox(visible=False)
106
- file_in = gr.File(visible=False)
107
- url_in = gr.Textbox(visible=False)
108
- model_in = gr.Textbox(visible=False)
109
- lang_in = gr.Textbox(visible=False)
110
- search_in = gr.Checkbox(visible=False)
111
- hist_state = gr.State([])
112
-
113
- code_out = gr.Textbox(visible=False)
114
- hist_out = gr.State([])
115
 
116
- # Expose /run/predict
117
- dummy_btn = gr.Button(visible=False)
118
- dummy_btn.click(
119
- fn=generate,
120
- inputs=[prompt_in, file_in, url_in, model_in, lang_in, search_in, hist_state],
121
- outputs=[code_out, hist_out],
122
- api_name="predict",
123
- queue=True,
124
- )
125
 
126
  if __name__ == "__main__":
127
- demo.queue().launch()
 
1
+ # app.py (root)
 
 
 
 
 
 
 
 
 
 
 
2
  from pathlib import Path
 
 
3
  import gradio as gr
4
+ from inference import chat_completion # your back‑end helpers
 
 
5
  from tavily_search import enhance_query_with_search
6
+ from utils import (extract_text_from_file, extract_website_content,
7
+ history_to_messages, apply_search_replace_changes,
8
+ remove_code_block, parse_transformers_js_output,
9
+ format_transformers_js_output)
10
+ from models import AVAILABLE_MODELS, find_model
11
+ from deploy import send_to_sandbox
12
+
13
+ # ---------- backend callback -------------
14
+ def generate(payload):
15
+ prompt = payload["prompt"]
16
+ model = find_model(payload["model_id"])
17
+ lang = payload["language"]
18
+ history = payload.get("history", [])
19
+ # same logic as before, returns {"code": ..., "history": [...] }
20
+ # (keep your existing generate() from the compact app.py)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  return code, history
22
 
23
+ # ---------- UI ---------------------------
24
+ INDEX = Path("static/index.html").read_text(encoding="utf-8")
 
 
 
25
 
26
  with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
27
+ # custom front‑end
28
+ gr.HTML(INDEX, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # hidden API endpoints
31
+ generator = gr.JSON(
32
+ label="hidden",
33
+ visible=False
34
+ ).api(generate, path="/run/predict", methods=["POST"])
 
 
 
 
35
 
36
  if __name__ == "__main__":
37
+ demo.launch()