mgbam commited on
Commit
13a7675
Β·
verified Β·
1 Parent(s): 48f06a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -179
app.py CHANGED
@@ -1,201 +1,140 @@
1
  # app.py
2
-
3
  """
4
- Main application file for SHASHA AI, a Gradio-based AI code generation tool.
 
 
 
 
 
5
 
6
- Provides a UI for generating code in many languages using various AI models.
7
- Supports text prompts, file uploads, website scraping, optional web search,
8
- and live previews of HTML output.
9
  """
10
 
 
 
 
11
  import gradio as gr
12
- from typing import Optional, Dict, List, Tuple, Any
13
-
14
- # --- Local module imports ---
15
- from constants import (
16
- HTML_SYSTEM_PROMPT,
17
- TRANSFORMERS_JS_SYSTEM_PROMPT,
18
- AVAILABLE_MODELS,
19
- DEMO_LIST,
20
- )
21
- from hf_client import get_inference_client
22
  from tavily_search import enhance_query_with_search
23
- from utils import (
 
 
24
  extract_text_from_file,
25
  extract_website_content,
26
- apply_search_replace_changes,
27
  history_to_messages,
28
- history_to_chatbot_messages,
29
  remove_code_block,
30
  parse_transformers_js_output,
31
  format_transformers_js_output,
32
  )
33
- from deploy import send_to_sandbox
34
-
35
- # --- Type aliases ---
36
- History = List[Tuple[str, str]]
37
- Model = Dict[str, Any]
38
-
39
- # --- Supported languages for dropdown ---
40
- SUPPORTED_LANGUAGES = [
41
- "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
42
- "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
43
- "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
44
- "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
45
- "sql-gpSQL", "sql-sparkSQL", "sql-esper"
46
- ]
47
-
48
- def get_model_details(name: str) -> Optional[Model]:
49
- for m in AVAILABLE_MODELS:
50
- if m["name"] == name:
51
- return m
52
- return None
53
-
54
- def generation_code(
55
- query: Optional[str],
56
- file: Optional[str],
57
- website_url: Optional[str],
58
- current_model: Model,
59
- enable_search: bool,
60
  language: str,
61
- history: Optional[History],
62
- ) -> Tuple[str, History, str, List[Dict[str, str]]]:
63
- query = query or ""
 
 
 
64
  history = history or []
65
- try:
66
- # Choose system prompt based on language
67
- if language == "html":
68
- system_prompt = HTML_SYSTEM_PROMPT
69
- elif language == "transformers.js":
70
- system_prompt = TRANSFORMERS_JS_SYSTEM_PROMPT
71
- else:
72
- # Generic fallback prompt
73
- system_prompt = (
74
- f"You are an expert {language} developer. "
75
- f"Write clean, idiomatic {language} code based on the user's request."
76
- )
77
-
78
- model_id = current_model["id"]
79
- # Determine provider
80
- if model_id.startswith("openai/") or model_id in {"gpt-4", "gpt-3.5-turbo"}:
81
- provider = "openai"
82
- elif model_id.startswith("gemini/") or model_id.startswith("google/"):
83
- provider = "gemini"
84
- elif model_id.startswith("fireworks-ai/"):
85
- provider = "fireworks-ai"
86
- else:
87
- provider = "auto"
88
-
89
- # Build message history
90
- msgs = history_to_messages(history, system_prompt)
91
- context = query
92
- if file:
93
- ftext = extract_text_from_file(file)
94
- context += f"\n\n[Attached file]\n{ftext[:5000]}"
95
- if website_url:
96
- wtext = extract_website_content(website_url)
97
- if not wtext.startswith("Error"):
98
- context += f"\n\n[Website content]\n{wtext[:8000]}"
99
- final_q = enhance_query_with_search(context, enable_search)
100
- msgs.append({"role": "user", "content": final_q})
101
-
102
- # Call the model
103
- client = get_inference_client(model_id, provider)
104
- resp = client.chat.completions.create(
105
- model=model_id,
106
- messages=msgs,
107
- max_tokens=16000,
108
- temperature=0.1
109
- )
110
- content = resp.choices[0].message.content
111
-
112
- except Exception as e:
113
- err = f"❌ **Error:**\n```\n{e}\n```"
114
- history.append((query, err))
115
- return "", history, "", history_to_chatbot_messages(history)
116
-
117
- # Process model output
118
- if language == "transformers.js":
119
- files = parse_transformers_js_output(content)
120
- code = format_transformers_js_output(files)
121
- preview = send_to_sandbox(files.get("index.html", ""))
122
- else:
123
- cleaned = remove_code_block(content)
124
- if history and history[-1][1] and not history[-1][1].startswith("❌"):
125
- code = apply_search_replace_changes(history[-1][1], cleaned)
126
- else:
127
- code = cleaned
128
- preview = send_to_sandbox(code) if language == "html" else ""
129
-
130
- new_hist = history + [(query, code)]
131
- chat = history_to_chatbot_messages(new_hist)
132
- return code, new_hist, preview, chat
133
-
134
- # --- Custom CSS ---
135
- CUSTOM_CSS = """
136
- body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; }
137
- #main_title { text-align: center; font-size: 2.5rem; margin-top: 1.5rem; }
138
- #subtitle { text-align: center; color: #4a5568; margin-bottom: 2.5rem; }
139
- .gradio-container { background-color: #f7fafc; }
140
- #gen_btn { box-shadow: 0 4px 6px rgba(0,0,0,0.1); }
141
- """
142
 
143
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), css=CUSTOM_CSS, title="Shasha AI") as demo:
144
- history_state = gr.State([])
145
- initial_model = AVAILABLE_MODELS[0]
146
- model_state = gr.State(initial_model)
147
-
148
- gr.Markdown("# πŸš€ Shasha AI", elem_id="main_title")
149
- gr.Markdown("Your AI partner for generating, modifying, and understanding code.", elem_id="subtitle")
150
-
151
- with gr.Row():
152
- with gr.Column(scale=1):
153
- gr.Markdown("### 1. Select Model")
154
- model_dd = gr.Dropdown(
155
- choices=[m["name"] for m in AVAILABLE_MODELS],
156
- value=initial_model["name"],
157
- label="AI Model"
158
- )
159
-
160
- gr.Markdown("### 2. Provide Context")
161
- with gr.Tabs():
162
- with gr.Tab("πŸ“ Prompt"):
163
- prompt_in = gr.Textbox(lines=7, placeholder="Describe your request...", show_label=False)
164
- with gr.Tab("πŸ“„ File"):
165
- file_in = gr.File(type="filepath")
166
- with gr.Tab("🌐 Website"):
167
- url_in = gr.Textbox(placeholder="https://example.com")
168
-
169
- gr.Markdown("### 3. Configure Output")
170
- lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Target Language")
171
- search_chk = gr.Checkbox(label="Enable Web Search")
172
-
173
- with gr.Row():
174
- clr_btn = gr.Button("Clear Session", variant="secondary")
175
- gen_btn = gr.Button("Generate Code", variant="primary", elem_id="gen_btn")
176
-
177
- with gr.Column(scale=2):
178
- with gr.Tabs():
179
- with gr.Tab("πŸ’» Code"):
180
- code_out = gr.Code(language="html", interactive=True)
181
- with gr.Tab("πŸ‘οΈ Live Preview"):
182
- preview_out = gr.HTML()
183
- with gr.Tab("πŸ“œ History"):
184
- chat_out = gr.Chatbot(type="messages")
185
-
186
- model_dd.change(lambda n: get_model_details(n) or initial_model, inputs=[model_dd], outputs=[model_state])
187
-
188
- gen_btn.click(
189
- fn=generation_code,
190
- inputs=[prompt_in, file_in, url_in, model_state, search_chk, lang_dd, history_state],
191
- outputs=[code_out, history_state, preview_out, chat_out],
192
- )
193
 
194
- clr_btn.click(
195
- lambda: ("", None, "", [], "", "", []),
196
- outputs=[prompt_in, file_in, url_in, history_state, code_out, preview_out, chat_out],
197
- queue=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  )
199
 
 
200
  if __name__ == "__main__":
201
  demo.queue().launch()
 
1
  # app.py
2
+ # ──────────────────────────────────────────────────────────────────────────────
3
  """
4
+ AnyCoderΒ /Β ShashaΒ AI – Gradio back‑end
5
+
6
+ β€’ Serves the custom front‑end located in static/index.html (+Β static/style.css,
7
+ static/index.js).
8
+ β€’ Exposes ONE httpΒ POST endpoint β†’ /run/predict (Gradio api_name="predict")
9
+ that the browser JS calls to run the model and get generated code.
10
 
11
+ All heavy lifting (model registry, provider routing, web‑search, etc.) lives in
12
+ β€’ models.py, inference.py, utils.py, deploy.py …
 
13
  """
14
 
15
+ from pathlib import Path
16
+ from typing import List, Tuple, Dict
17
+
18
  import gradio as gr
19
+
20
+ # ── Local helpers ────────────────────────────────────────────────────────────
21
+ from inference import chat_completion
 
 
 
 
 
 
 
22
  from tavily_search import enhance_query_with_search
23
+ from deploy import send_to_sandbox
24
+ from models import AVAILABLE_MODELS, find_model, ModelInfo
25
+ from utils import (
26
  extract_text_from_file,
27
  extract_website_content,
 
28
  history_to_messages,
29
+ apply_search_replace_changes,
30
  remove_code_block,
31
  parse_transformers_js_output,
32
  format_transformers_js_output,
33
  )
34
+
35
+ # ── System prompts keyed by language ────────────────────────────────────────
36
+ SYSTEM_PROMPTS = {
37
+ "html": (
38
+ "ONLY USE HTML, CSS AND JAVASCRIPT. Return exactly ONE file wrapped in "
39
+ "```html ...```."
40
+ ),
41
+ "transformers.js": (
42
+ "Generate THREE separate files (index.html β€’ index.js β€’ style.css) each "
43
+ "inside its own fenced block."
44
+ ),
45
+ }
46
+
47
+ # ── Output‑history data structure ───────────────────────────────────────────
48
+ History = List[Tuple[str, str]] #Β [(user_query, generated_code), …]
49
+
50
+ # ════════════════════════════════════════════════════════════════════════════
51
+ # 1. Backend callback hit by the JS front‑end
52
+ # ════════════════════════════════════════════════════════════════════════════
53
+ def generate(
54
+ prompt: str,
55
+ file_path: str | None,
56
+ website_url: str | None,
57
+ model_id: str,
 
 
 
58
  language: str,
59
+ web_search: bool,
60
+ history: History | None,
61
+ ) -> Dict[str, str]:
62
+ """
63
+ The only public API. Returns: { "code": <string> }
64
+ """
65
  history = history or []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
+ # ---- Build system + user messages --------------------------------------
68
+ sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
69
+ messages = history_to_messages(history, sys_prompt)
70
+
71
+ ctx: list[str] = [prompt.strip()]
72
+
73
+ if file_path:
74
+ ctx.append("[File]")
75
+ ctx.append(extract_text_from_file(file_path)[:5_000])
76
+
77
+ if website_url:
78
+ html = extract_website_content(website_url)
79
+ if not html.startswith("Error"):
80
+ ctx.append("[Website]")
81
+ ctx.append(html[:8_000])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ user_query = "\n\n".join(filter(None, ctx))
84
+ user_query = enhance_query_with_search(user_query, web_search)
85
+ messages.append({"role": "user", "content": user_query})
86
+
87
+ # ---- Call model --------------------------------------------------------
88
+ model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
89
+ assistant = chat_completion(model.id, messages)
90
+
91
+ # ---- Post‑process output ----------------------------------------------
92
+ if language == "transformers.js":
93
+ files = parse_transformers_js_output(assistant)
94
+ code = format_transformers_js_output(files)
95
+ else:
96
+ clean = remove_code_block(assistant)
97
+ if history and not history[-1][1].startswith("❌"):
98
+ clean = apply_search_replace_changes(history[-1][1], clean)
99
+ code = clean
100
+
101
+ # (preview iframe is rendered entirely client‑side from code)
102
+ return {"code": code}
103
+
104
+
105
+ # ════════════════════════════════════════════════════════════════════════════
106
+ # 2. Gradio wrapper
107
+ # ════════════════════════════════════════════════════════════════════════════
108
+ # Read static front‑end
109
+ INDEX_HTML = Path("static/index.html").read_text(encoding="utf-8")
110
+
111
+ with gr.Blocks(css="body{margin:0}", title="AnyCoderΒ AI") as demo:
112
+ # 2‑a Β Serve the custom UI
113
+ gr.HTML(INDEX_HTML)
114
+
115
+ # 2‑b Invisible components acting as our JSON REST API
116
+ with gr.Group(visible=False) as backend:
117
+ prompt_in = gr.Textbox()
118
+ file_in = gr.File()
119
+ url_in = gr.Textbox()
120
+ model_in = gr.Textbox()
121
+ lang_in = gr.Textbox()
122
+ search_in = gr.Checkbox()
123
+ hist_state = gr.State([]) # persists conversation on server
124
+
125
+ code_out = gr.JSON(label="code") # returns {"code": "..."} to the JS
126
+
127
+ trigger = gr.Button(visible=False)
128
+ trigger.click(
129
+ fn=generate,
130
+ inputs=[
131
+ prompt_in, file_in, url_in,
132
+ model_in, lang_in, search_in, hist_state
133
+ ],
134
+ outputs=[code_out],
135
+ api_name="predict", # <─ POST /run/predict
136
  )
137
 
138
+ # ---------------------------------------------------------------------------
139
  if __name__ == "__main__":
140
  demo.queue().launch()