mgbam commited on
Commit
1c75fd0
·
verified ·
1 Parent(s): 9fb92de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -146
app.py CHANGED
@@ -1,42 +1,50 @@
1
  # app.py
2
-
 
 
3
  """
4
- Main application file for SHASHA AI, a Gradio-based AI code generation tool.
5
-
6
- Provides a UI for generating code in many languages using various AI models.
7
- Supports text prompts, file uploads, website scraping, optional web search,
8
- and live previews of HTML output.
 
 
 
 
 
 
 
 
 
 
9
  """
10
 
 
 
 
 
11
  import gradio as gr
12
- from typing import Optional, Dict, List, Tuple, Any
13
-
14
- # --- Local module imports ---
15
- from constants import (
16
- HTML_SYSTEM_PROMPT,
17
- TRANSFORMERS_JS_SYSTEM_PROMPT,
18
- AVAILABLE_MODELS,
19
- DEMO_LIST,
20
- )
21
- from hf_client import get_inference_client
22
  from tavily_search import enhance_query_with_search
23
- from utils import (
 
24
  extract_text_from_file,
25
  extract_website_content,
26
- apply_search_replace_changes,
27
- history_to_messages,
28
  history_to_chatbot_messages,
29
- remove_code_block,
30
  parse_transformers_js_output,
31
- format_transformers_js_output,
32
  )
33
- from deploy import send_to_sandbox
34
 
35
- # --- Type aliases ---
36
- History = List[Tuple[str, str]]
37
- Model = Dict[str, Any]
38
 
39
- # --- Supported languages for dropdown ---
40
  SUPPORTED_LANGUAGES = [
41
  "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
42
  "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
@@ -45,157 +53,158 @@ SUPPORTED_LANGUAGES = [
45
  "sql-gpSQL", "sql-sparkSQL", "sql-esper"
46
  ]
47
 
48
- def get_model_details(name: str) -> Optional[Model]:
49
- for m in AVAILABLE_MODELS:
50
- if m["name"] == name:
51
- return m
52
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  def generation_code(
55
- query: Optional[str],
56
- file: Optional[str],
57
- website_url: Optional[str],
58
- current_model: Model,
59
  enable_search: bool,
60
  language: str,
61
- history: Optional[History],
62
- ) -> Tuple[str, History, str, List[Dict[str, str]]]:
63
- query = query or ""
64
- history = history or []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  try:
66
- # Choose system prompt based on language
67
- if language == "html":
68
- system_prompt = HTML_SYSTEM_PROMPT
69
- elif language == "transformers.js":
70
- system_prompt = TRANSFORMERS_JS_SYSTEM_PROMPT
71
- else:
72
- # Generic fallback prompt
73
- system_prompt = (
74
- f"You are an expert {language} developer. "
75
- f"Write clean, idiomatic {language} code based on the user's request."
76
- )
77
-
78
- model_id = current_model["id"]
79
- # Determine provider
80
- if model_id.startswith("openai/") or model_id in {"gpt-4", "gpt-3.5-turbo"}:
81
- provider = "openai"
82
- elif model_id.startswith("gemini/") or model_id.startswith("google/"):
83
- provider = "gemini"
84
- elif model_id.startswith("fireworks-ai/"):
85
- provider = "fireworks-ai"
86
- else:
87
- provider = "auto"
88
-
89
- # Build message history
90
- msgs = history_to_messages(history, system_prompt)
91
- context = query
92
- if file:
93
- ftext = extract_text_from_file(file)
94
- context += f"\n\n[Attached file]\n{ftext[:5000]}"
95
- if website_url:
96
- wtext = extract_website_content(website_url)
97
- if not wtext.startswith("Error"):
98
- context += f"\n\n[Website content]\n{wtext[:8000]}"
99
- final_q = enhance_query_with_search(context, enable_search)
100
- msgs.append({"role": "user", "content": final_q})
101
-
102
- # Call the model
103
- client = get_inference_client(model_id, provider)
104
- resp = client.chat.completions.create(
105
- model=model_id,
106
- messages=msgs,
107
- max_tokens=16000,
108
- temperature=0.1
109
- )
110
- content = resp.choices[0].message.content
111
-
112
- except Exception as e:
113
- err = f"❌ **Error:**\n```\n{e}\n```"
114
- history.append((query, err))
115
- return "", history, "", history_to_chatbot_messages(history)
116
-
117
- # Process model output
118
  if language == "transformers.js":
119
- files = parse_transformers_js_output(content)
120
- code = format_transformers_js_output(files)
121
- preview = send_to_sandbox(files.get("index.html", ""))
122
  else:
123
- cleaned = remove_code_block(content)
124
- if history and history[-1][1] and not history[-1][1].startswith("❌"):
125
- code = apply_search_replace_changes(history[-1][1], cleaned)
126
- else:
127
- code = cleaned
128
- preview = send_to_sandbox(code) if language == "html" else ""
129
-
130
- new_hist = history + [(query, code)]
131
- chat = history_to_chatbot_messages(new_hist)
132
- return code, new_hist, preview, chat
133
-
134
- # --- Custom CSS ---
135
- CUSTOM_CSS = """
136
- body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; }
137
- #main_title { text-align: center; font-size: 2.5rem; margin-top: 1.5rem; }
138
- #subtitle { text-align: center; color: #4a5568; margin-bottom: 2.5rem; }
139
- .gradio-container { background-color: #f7fafc; }
140
- #gen_btn { box-shadow: 0 4px 6px rgba(0,0,0,0.1); }
141
- """
142
 
143
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), css=CUSTOM_CSS, title="Shasha AI") as demo:
144
- history_state = gr.State([])
145
- initial_model = AVAILABLE_MODELS[0]
146
- model_state = gr.State(initial_model)
147
 
148
- gr.Markdown("# 🚀 Shasha AI", elem_id="main_title")
149
- gr.Markdown("Your AI partner for generating, modifying, and understanding code.", elem_id="subtitle")
150
 
 
 
 
 
 
 
 
 
 
151
  with gr.Row():
152
  with gr.Column(scale=1):
153
- gr.Markdown("### 1. Select Model")
154
  model_dd = gr.Dropdown(
155
- choices=[m["name"] for m in AVAILABLE_MODELS],
156
- value=initial_model["name"],
157
- label="AI Model"
158
  )
159
 
160
- gr.Markdown("### 2. Provide Context")
161
  with gr.Tabs():
162
- with gr.Tab("📝 Prompt"):
163
- prompt_in = gr.Textbox(lines=7, placeholder="Describe your request...", show_label=False)
164
- with gr.Tab("📄 File"):
165
- file_in = gr.File(type="filepath")
166
- with gr.Tab("🌐 Website"):
167
- url_in = gr.Textbox(placeholder="https://example.com")
168
 
169
- gr.Markdown("### 3. Configure Output")
170
- lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Target Language")
171
- search_chk = gr.Checkbox(label="Enable Web Search")
172
 
173
  with gr.Row():
174
- clr_btn = gr.Button("Clear Session", variant="secondary")
175
- gen_btn = gr.Button("Generate Code", variant="primary", elem_id="gen_btn")
176
 
 
177
  with gr.Column(scale=2):
178
  with gr.Tabs():
179
- with gr.Tab("💻 Code"):
180
- code_out = gr.Code(language="html", interactive=True)
181
- with gr.Tab("👁️ Live Preview"):
182
  preview_out = gr.HTML()
183
- with gr.Tab("📜 History"):
184
  chat_out = gr.Chatbot(type="messages")
185
 
186
- model_dd.change(lambda n: get_model_details(n) or initial_model, inputs=[model_dd], outputs=[model_state])
187
-
188
  gen_btn.click(
189
- fn=generation_code,
190
- inputs=[prompt_in, file_in, url_in, model_state, search_chk, lang_dd, history_state],
191
- outputs=[code_out, history_state, preview_out, chat_out],
 
 
 
 
 
 
 
 
192
  )
193
 
194
- clr_btn.click(
195
  lambda: ("", None, "", [], "", "", []),
196
- outputs=[prompt_in, file_in, url_in, history_state, code_out, preview_out, chat_out],
197
  queue=False,
198
  )
199
 
 
200
  if __name__ == "__main__":
201
  demo.queue().launch()
 
1
  # app.py
2
+ # ------------------------------------------------------------------
3
+ # AnyCoder / Shasha AI – Gradio front‑end
4
+ # ------------------------------------------------------------------
5
  """
6
+ A lightweight Gradio UI that lets users:
7
+
8
+ 1. Pick an AI model (OpenAI / Gemini / Groq / HF etc.).
9
+ 2. Provide context via prompt, file upload, or website URL.
10
+ 3. Choose a target language (HTML, Python, JS, …) and optionally enable
11
+ Tavily web‑search enrichment.
12
+ 4. Generate code, show a live HTML preview, and keep a session history.
13
+
14
+ The heavy lifting (provider routing, web‑search merge, code‑post‑processing)
15
+ lives in:
16
+ • models.py – central model registry
17
+ • hf_client.py – provider‑aware InferenceClient factory
18
+ • inference.py – chat_completion / stream_chat_completion
19
+ • utils.py – helpers (file/website extraction, history utils)
20
+ • deploy.py – sandbox renderer & HF Spaces helpers
21
  """
22
 
23
+ from __future__ import annotations
24
+
25
+ from typing import Any, List, Optional, Tuple
26
+
27
  import gradio as gr
28
+
29
+ from deploy import send_to_sandbox
30
+ from inference import chat_completion
31
+ from models import AVAILABLE_MODELS, ModelInfo, find_model
 
 
 
 
 
 
32
  from tavily_search import enhance_query_with_search
33
+ from utils import ( # high‑level utils
34
+ apply_search_replace_changes,
35
  extract_text_from_file,
36
  extract_website_content,
37
+ format_transformers_js_output,
 
38
  history_to_chatbot_messages,
39
+ history_to_messages,
40
  parse_transformers_js_output,
41
+ remove_code_block,
42
  )
 
43
 
44
+ # ------------------------------------------------------------------
45
+ # Configuration
46
+ # ------------------------------------------------------------------
47
 
 
48
  SUPPORTED_LANGUAGES = [
49
  "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
50
  "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
 
53
  "sql-gpSQL", "sql-sparkSQL", "sql-esper"
54
  ]
55
 
56
+
57
+ SYSTEM_PROMPTS = {
58
+ "html": (
59
+ "ONLY USE HTML, CSS AND JAVASCRIPT. Create a modern, responsive UI. "
60
+ "Return <strong>ONE</strong> HTML file wrapped in ```html ...```."
61
+ ),
62
+ "transformers.js": (
63
+ "You are an expert web developer. Generate THREE separate files "
64
+ "(index.html / index.js / style.css) returned as three fenced blocks."
65
+ ),
66
+ }
67
+
68
+
69
+ # ------------------------------------------------------------------
70
+ # Core generation callback
71
+ # ------------------------------------------------------------------
72
+ History = List[Tuple[str, str]]
73
+
74
 
75
  def generation_code(
76
+ prompt: str | None,
77
+ file_path: str | None,
78
+ website_url: str | None,
79
+ model_name: str,
80
  enable_search: bool,
81
  language: str,
82
+ state_history: History | None,
83
+ ) -> Tuple[str, History, str, List[dict[str, str]]]:
84
+ """Backend function wired to the ✨ Generate button."""
85
+ prompt = (prompt or "").strip()
86
+ history = state_history or []
87
+
88
+ # ------------------------------------------------------------------
89
+ # Compose system prompt + context
90
+ # ------------------------------------------------------------------
91
+ sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
92
+ messages = history_to_messages(history, sys_prompt)
93
+
94
+ # --- append file / website context --------------------------------
95
+ context_parts: list[str] = [prompt]
96
+
97
+ if file_path:
98
+ context_parts.append("[Reference file]")
99
+ context_parts.append(extract_text_from_file(file_path)[:5000])
100
+
101
+ if website_url:
102
+ website_html = extract_website_content(website_url)
103
+ if not website_html.startswith("Error"):
104
+ context_parts.append("[Website content]")
105
+ context_parts.append(website_html[:8000])
106
+
107
+ user_query = "\n\n".join(filter(None, context_parts))
108
+ user_query = enhance_query_with_search(user_query, enable_search)
109
+ messages.append({"role": "user", "content": user_query})
110
+
111
+ # ------------------------------------------------------------------
112
+ # Call model via inference.py – provider routing handled inside
113
+ # ------------------------------------------------------------------
114
+ model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0]
115
  try:
116
+ assistant_reply = chat_completion(model.id, messages)
117
+ except Exception as exc: # pragma: no cover
118
+ err_msg = f"❌ **Generation error**\n```{exc}```"
119
+ new_history = history + [(prompt, err_msg)]
120
+ return "", new_history, "", history_to_chatbot_messages(new_history)
121
+
122
+ # ------------------------------------------------------------------
123
+ # Post‑process output
124
+ # ------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  if language == "transformers.js":
126
+ files = parse_transformers_js_output(assistant_reply)
127
+ code_out = format_transformers_js_output(files)
128
+ preview_html = send_to_sandbox(files.get("index.html", ""))
129
  else:
130
+ cleaned = remove_code_block(assistant_reply)
131
+ # search/replace patching for iterative edits
132
+ if history and not history[-1][1].startswith("❌"):
133
+ cleaned = apply_search_replace_changes(history[-1][1], cleaned)
134
+ code_out = cleaned
135
+ preview_html = send_to_sandbox(cleaned) if language == "html" else ""
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
+ new_history = history + [(prompt, code_out)]
138
+ chat_history = history_to_chatbot_messages(new_history)
139
+ return code_out, new_history, preview_html, chat_history
 
140
 
 
 
141
 
142
+ # ------------------------------------------------------------------
143
+ # Gradio UI
144
+ # ------------------------------------------------------------------
145
+ THEME = gr.themes.Soft(primary_hue="blue")
146
+
147
+ with gr.Blocks(theme=THEME, title="AnyCoder / Shasha AI") as demo:
148
+ state_history = gr.State([])
149
+
150
+ # -------------------- sidebar (inputs) ---------------------------
151
  with gr.Row():
152
  with gr.Column(scale=1):
153
+ gr.Markdown("### 1 · Model")
154
  model_dd = gr.Dropdown(
155
+ choices=[m.name for m in AVAILABLE_MODELS],
156
+ value=AVAILABLE_MODELS[0].name,
157
+ label="AI Model",
158
  )
159
 
160
+ gr.Markdown("### 2 · Context")
161
  with gr.Tabs():
162
+ with gr.Tab("Prompt"):
163
+ prompt_box = gr.Textbox(lines=6, placeholder="Describe what you need...")
164
+ with gr.Tab("File"):
165
+ file_box = gr.File(type="filepath")
166
+ with gr.Tab("Website"):
167
+ url_box = gr.Textbox(placeholder="https://example.com")
168
 
169
+ gr.Markdown("### 3 · Output")
170
+ lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Language")
171
+ search_chk = gr.Checkbox(label="Enable Tavily Web Search")
172
 
173
  with gr.Row():
174
+ clear_btn = gr.Button("Clear", variant="secondary")
175
+ gen_btn = gr.Button("Generate ", variant="primary")
176
 
177
+ # -------------------- main panel (outputs) --------------------
178
  with gr.Column(scale=2):
179
  with gr.Tabs():
180
+ with gr.Tab("Code"):
181
+ code_out = gr.Code(interactive=True)
182
+ with gr.Tab("Preview"):
183
  preview_out = gr.HTML()
184
+ with gr.Tab("History"):
185
  chat_out = gr.Chatbot(type="messages")
186
 
187
+ # -------------------- callbacks ----------------------------------
 
188
  gen_btn.click(
189
+ generation_code,
190
+ inputs=[
191
+ prompt_box,
192
+ file_box,
193
+ url_box,
194
+ model_dd,
195
+ search_chk,
196
+ lang_dd,
197
+ state_history,
198
+ ],
199
+ outputs=[code_out, state_history, preview_out, chat_out],
200
  )
201
 
202
+ clear_btn.click(
203
  lambda: ("", None, "", [], "", "", []),
204
+ outputs=[prompt_box, file_box, url_box, state_history, code_out, preview_out, chat_out],
205
  queue=False,
206
  )
207
 
208
+ # ------------------------------------------------------------------
209
  if __name__ == "__main__":
210
  demo.queue().launch()