mgbam commited on
Commit
67c2fb1
Β·
verified Β·
1 Parent(s): c928d36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -133
app.py CHANGED
@@ -1,182 +1,216 @@
1
  # app.py
2
  """
3
- ShashaCodeβ€―Builder – AI code‑generation playground.
4
 
5
- β€’ Huggingβ€―FaceΒ Spaces +Β Gradio front‑end
6
- β€’ Supports prompts, file upload, web‑site scraping, optional web search
7
- β€’ Streams code back, shows live HTML preview, can deploy to a user Space
8
  """
9
 
10
- # ───────────────────────────────────────── Imports
11
  import gradio as gr
12
- from pathlib import Path
13
- from typing import Dict, List, Optional, Tuple, Any
14
 
15
- from constants import ( # ← all constants live here
 
16
  HTML_SYSTEM_PROMPT,
17
  TRANSFORMERS_JS_SYSTEM_PROMPT,
18
- SYSTEM_PROMPTS,
19
  AVAILABLE_MODELS,
20
  DEMO_LIST,
21
- GRADIO_SUPPORTED_LANGUAGES, # ← new import
22
- SEARCH_START, DIVIDER, REPLACE_END,
23
  )
24
-
25
- from hf_client import get_inference_client
26
- from tavily_search import enhance_query_with_search
27
- from utils import ( # helpers split into utils.py
 
 
28
  history_to_messages,
29
  history_to_chatbot_messages,
30
  remove_code_block,
31
  parse_transformers_js_output,
32
  format_transformers_js_output,
33
- parse_svelte_output,
34
- format_svelte_output,
35
- apply_search_replace_changes,
36
- apply_transformers_js_search_replace_changes,
37
- extract_text_from_file,
38
- extract_website_content,
39
- get_gradio_language,
40
  )
41
- from deploy import send_to_sandbox
42
 
43
- # ───────────────────────────────────────── Type Aliases
44
  History = List[Tuple[str, str]]
45
- ModelInfo = Dict[str, Any]
46
-
47
- # ───────────────────────────────────────── Core Function
48
- def generate_code(
49
- query: str,
50
- file_path: Optional[str],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  website_url: Optional[str],
52
- model: ModelInfo,
53
  enable_search: bool,
54
  language: str,
55
  history: Optional[History],
56
  ) -> Tuple[str, History, str, List[Dict[str, str]]]:
57
- """Main inference pipeline: build prompt β†’ call model β†’ post‑process."""
58
- query = query or ""
59
- history = history or []
60
-
61
- # 1. pick system prompt
62
- if language == "html": system = HTML_SYSTEM_PROMPT
63
- elif language == "transformers.js": system = TRANSFORMERS_JS_SYSTEM_PROMPT
64
- else: system = SYSTEM_PROMPTS.get(language, HTML_SYSTEM_PROMPT)
65
-
66
- # 2. build message list
67
- messages = history_to_messages(history, system)
68
-
69
- ctx_parts = [query.strip()]
70
-
71
- if file_path: ctx_parts += ["[File]", extract_text_from_file(file_path)[:5000]]
72
- if website_url:
73
- html = extract_website_content(website_url)
74
- if not html.startswith("Error"):
75
- ctx_parts += ["[Website]", html[:8000]]
76
-
77
- user_query = "\n\n".join(ctx_parts)
78
- user_query = enhance_query_with_search(user_query, enable_search)
79
- messages.append({"role": "user", "content": user_query})
80
-
81
- # 3. call model
82
- client = get_inference_client(model["id"])
83
- resp = client.chat.completions.create(
84
- model=model["id"],
85
- messages=messages,
86
- max_tokens=16000,
87
- temperature=0.15,
88
- )
89
- answer = resp.choices[0].message.content
90
 
91
- # 4. post‑process
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  if language == "transformers.js":
93
- files = parse_transformers_js_output(answer)
94
- code = format_transformers_js_output(files)
95
  preview = send_to_sandbox(files.get("index.html", ""))
96
  else:
97
- clean = remove_code_block(answer)
98
- if history and not history[-1][1].startswith("❌"):
99
- clean = apply_search_replace_changes(history[-1][1], clean)
100
- code = clean
 
101
  preview = send_to_sandbox(code) if language == "html" else ""
102
 
103
- history.append((query, code))
104
- chat_msgs = history_to_chatbot_messages(history)
105
- return code, history, preview, chat_msgs
106
-
107
-
108
- # ───────────────────────────────────────── UI
109
- LOGO_PATH = "assets/logo.png" # ensure this file exists
110
 
 
111
  CUSTOM_CSS = """
112
- body {font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;}
113
- #logo {max-height:64px;margin:auto;}
 
 
 
 
114
  """
115
 
116
- with gr.Blocks(css=CUSTOM_CSS, title="ShashaCodeβ€―Builder") as demo:
117
- state_history = gr.State([])
118
- state_model = gr.State(AVAILABLE_MODELS[0])
 
119
 
120
- # Header
121
- with gr.Row():
122
- gr.Image(LOGO_PATH, elem_id="logo", show_label=False, height=64)
123
- gr.Markdown("## **AnyCoderΒ AI**\nYour AI partner for generating, modifying & understanding code.")
 
 
 
 
 
124
 
125
- # Sidebar (inputs)
126
  with gr.Row():
127
- with gr.Column(scale=1, min_width=300):
128
- # Model
129
- dd_model = gr.Dropdown(
130
- label="AIΒ Model",
131
  choices=[m["name"] for m in AVAILABLE_MODELS],
132
- value=AVAILABLE_MODELS[0]["name"],
 
133
  )
134
 
135
- # Prompt / File / Website tabs
136
  with gr.Tabs():
137
- with gr.Tab("Prompt"):
138
- tb_prompt = gr.Textbox(label="Describe what you'd like to build…", lines=6)
139
- with gr.Tab("File"):
140
- inp_file = gr.File(label="Reference file", type="filepath")
141
- with gr.Tab("Website"):
142
- tb_url = gr.Textbox(label="URL to redesign")
143
-
144
- # Output config
145
- dd_lang = gr.Dropdown(
146
- label="Target language",
147
- choices=[l for l in GRADIO_SUPPORTED_LANGUAGES if l], # ← fixed list
148
- value="html",
149
- )
150
- chk_search = gr.Checkbox(label="EnableΒ Tavily WebΒ Search")
151
-
152
- # Buttons
153
- btn_generate = gr.Button("Generateβ€―Code", variant="primary")
154
- btn_clear = gr.Button("ClearΒ Session", variant="secondary")
155
-
156
- # Main panel (outputs)
157
  with gr.Column(scale=2):
158
  with gr.Tabs():
159
- with gr.Tab("Code"):
160
- out_code = gr.Code(language="html", show_label=False)
161
- with gr.Tab("Preview"):
162
- out_prev = gr.HTML()
163
- with gr.Tab("History"):
164
- out_hist = gr.Chatbot(type="messages")
165
-
166
- # ─── Callbacks ─────────────────────────────────────────────
167
- def _model_from_name(name):
168
- return next((m for m in AVAILABLE_MODELS if m["name"] == name), AVAILABLE_MODELS[0])
169
-
170
- dd_model.change(lambda n: _model_from_name(n), inputs=dd_model, outputs=state_model)
171
-
172
- btn_generate.click(
173
- fn=generate_code,
174
- inputs=[tb_prompt, inp_file, tb_url, state_model, chk_search, dd_lang, state_history],
175
- outputs=[out_code, state_history, out_prev, out_hist],
176
  )
177
 
178
- btn_clear.click(lambda: ("", None, "", [], "", []),
179
- outputs=[tb_prompt, inp_file, tb_url, state_history, out_code, out_prev])
 
 
 
180
 
181
  if __name__ == "__main__":
182
  demo.queue().launch()
 
1
  # app.py
2
  """
3
+ Main application file for SHASHAΒ AI, aβ€―Gradio‑based AI code‑generation tool.
4
 
5
+ Provides a UI for generating code in many languages using various AI models.
6
+ Supports text prompts, file uploads, website scraping, optional web search,
7
+ and live previews of HTML output.
8
  """
9
 
 
10
  import gradio as gr
11
+ from typing import Optional, Dict, List, Tuple, Any
 
12
 
13
+ # ─── Local module imports ────────────────────────────────────────────────
14
+ from constants import (
15
  HTML_SYSTEM_PROMPT,
16
  TRANSFORMERS_JS_SYSTEM_PROMPT,
 
17
  AVAILABLE_MODELS,
18
  DEMO_LIST,
 
 
19
  )
20
+ from hf_client import get_inference_client
21
+ from tavily_search import enhance_query_with_search
22
+ from utils import (
23
+ extract_text_from_file,
24
+ extract_website_content,
25
+ apply_search_replace_changes,
26
  history_to_messages,
27
  history_to_chatbot_messages,
28
  remove_code_block,
29
  parse_transformers_js_output,
30
  format_transformers_js_output,
 
 
 
 
 
 
 
31
  )
32
+ from deploy import send_to_sandbox
33
 
34
+ # ─── Type aliases ───────────────────────────────────────────────────────
35
  History = List[Tuple[str, str]]
36
+ Model = Dict[str, Any]
37
+
38
+ # ─── Supported languages (dropdown) ─────────────────────────────────────
39
+ SUPPORTED_LANGUAGES = [
40
+ "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
41
+ "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
42
+ "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
43
+ "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
44
+ "sql-gpSQL", "sql-sparkSQL", "sql-esper",
45
+ ]
46
+
47
+ def get_model_details(name: str) -> Optional[Model]:
48
+ for m in AVAILABLE_MODELS:
49
+ if m["name"] == name:
50
+ return m
51
+ return None
52
+
53
+ # ─── Core generation function ───────────────────────────────────────────
54
+ def generation_code(
55
+ query: Optional[str],
56
+ file: Optional[str],
57
  website_url: Optional[str],
58
+ current_model: Model,
59
  enable_search: bool,
60
  language: str,
61
  history: Optional[History],
62
  ) -> Tuple[str, History, str, List[Dict[str, str]]]:
63
+ query = query or ""
64
+ history = history or []
65
+ try:
66
+ # Choose system prompt based on language
67
+ if language == "html":
68
+ system_prompt = HTML_SYSTEM_PROMPT
69
+ elif language == "transformers.js":
70
+ system_prompt = TRANSFORMERS_JS_SYSTEM_PROMPT
71
+ else:
72
+ system_prompt = (
73
+ f"You are an expert {language} developer. "
74
+ f"Write clean, idiomatic {language} code based on the user's request."
75
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ model_id = current_model["id"]
78
+ # Determine provider
79
+ if model_id.startswith("openai/") or model_id in {"gpt-4", "gpt-3.5-turbo"}:
80
+ provider = "openai"
81
+ elif model_id.startswith("gemini/") or model_id.startswith("google/"):
82
+ provider = "gemini"
83
+ elif model_id.startswith("fireworks-ai/"):
84
+ provider = "fireworks-ai"
85
+ else:
86
+ provider = "auto"
87
+
88
+ # Build message history
89
+ msgs = history_to_messages(history, system_prompt)
90
+ context = query
91
+ if file:
92
+ ftext = extract_text_from_file(file)
93
+ context += f"\n\n[Attached file]\n{ftext[:5000]}"
94
+ if website_url:
95
+ wtext = extract_website_content(website_url)
96
+ if not wtext.startswith("Error"):
97
+ context += f"\n\n[Website content]\n{wtext[:8000]}"
98
+ final_q = enhance_query_with_search(context, enable_search)
99
+ msgs.append({"role": "user", "content": final_q})
100
+
101
+ # Call the model
102
+ client = get_inference_client(model_id, provider)
103
+ resp = client.chat.completions.create(
104
+ model=model_id,
105
+ messages=msgs,
106
+ max_tokens=16000,
107
+ temperature=0.1,
108
+ )
109
+ content = resp.choices[0].message.content
110
+
111
+ except Exception as e:
112
+ err = f"❌ **Error:**\n```\n{e}\n```"
113
+ history.append((query, err))
114
+ return "", history, "", history_to_chatbot_messages(history)
115
+
116
+ # Process model output
117
  if language == "transformers.js":
118
+ files = parse_transformers_js_output(content)
119
+ code = format_transformers_js_output(files)
120
  preview = send_to_sandbox(files.get("index.html", ""))
121
  else:
122
+ cleaned = remove_code_block(content)
123
+ if history and history[-1][1] and not history[-1][1].startswith("❌"):
124
+ code = apply_search_replace_changes(history[-1][1], cleaned)
125
+ else:
126
+ code = cleaned
127
  preview = send_to_sandbox(code) if language == "html" else ""
128
 
129
+ new_hist = history + [(query, code)]
130
+ chat = history_to_chatbot_messages(new_hist)
131
+ return code, new_hist, preview, chat
 
 
 
 
132
 
133
+ # ─── Custom CSS (added #logo rule) ───────────────────────────────────────
134
  CUSTOM_CSS = """
135
+ body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; }
136
+ #logo { display:block; margin:20px auto; max-height:80px; }
137
+ #main_title{ text-align:center; font-size:2.5rem; margin-top:0.5rem; }
138
+ #subtitle { text-align:center; color:#4a5568; margin-bottom:2.0rem; }
139
+ .gradio-container { background-color:#f7fafc; }
140
+ #gen_btn { box-shadow:0 4px 6px rgba(0,0,0,0.1); }
141
  """
142
 
143
+ # ─── Gradio UI ───────────────────────────────────────────────────────────
144
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"),
145
+ css=CUSTOM_CSS,
146
+ title="ShashaΒ AI") as demo:
147
 
148
+ history_state = gr.State([])
149
+ initial_model = AVAILABLE_MODELS[0]
150
+ model_state = gr.State(initial_model)
151
+
152
+ # Logo β€’ Title β€’ Subtitle
153
+ gr.Image("assets/logo.png", elem_id="logo", show_label=False)
154
+ gr.Markdown("# πŸš€ ShashaΒ AI", elem_id="main_title")
155
+ gr.Markdown("Your AI partner for generating, modifying, and understanding code.",
156
+ elem_id="subtitle")
157
 
 
158
  with gr.Row():
159
+ # ── Left column (inputs)
160
+ with gr.Column(scale=1):
161
+ gr.Markdown("### 1.Β Select Model")
162
+ model_dd = gr.Dropdown(
163
  choices=[m["name"] for m in AVAILABLE_MODELS],
164
+ value=initial_model["name"],
165
+ label="AI Model",
166
  )
167
 
168
+ gr.Markdown("### 2.Β Provide Context")
169
  with gr.Tabs():
170
+ with gr.Tab("πŸ“ Prompt"):
171
+ prompt_in = gr.Textbox(lines=7,
172
+ placeholder="Describe your request…",
173
+ show_label=False)
174
+ with gr.Tab("πŸ“„ File"):
175
+ file_in = gr.File(type="filepath")
176
+ with gr.Tab("🌐 Website"):
177
+ url_in = gr.Textbox(placeholder="https://example.com")
178
+
179
+ gr.Markdown("### 3.Β Configure Output")
180
+ lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES,
181
+ value="html",
182
+ label="Target Language")
183
+ search_chk = gr.Checkbox(label="EnableΒ WebΒ Search")
184
+
185
+ with gr.Row():
186
+ clr_btn = gr.Button("Clear Session", variant="secondary")
187
+ gen_btn = gr.Button("GenerateΒ Code", variant="primary", elem_id="gen_btn")
188
+
189
+ # ── Right column (outputs)
190
  with gr.Column(scale=2):
191
  with gr.Tabs():
192
+ with gr.Tab("πŸ’» Code"):
193
+ code_out = gr.Code(language="html", interactive=True)
194
+ with gr.Tab("πŸ‘οΈ Live Preview"):
195
+ preview_out = gr.HTML()
196
+ with gr.Tab("πŸ“œ History"):
197
+ chat_out = gr.Chatbot(type="messages")
198
+
199
+ # ── Callbacks
200
+ model_dd.change(lambda n: get_model_details(n) or initial_model,
201
+ inputs=[model_dd], outputs=[model_state])
202
+
203
+ gen_btn.click(
204
+ fn=generation_code,
205
+ inputs=[prompt_in, file_in, url_in, model_state, search_chk, lang_dd, history_state],
206
+ outputs=[code_out, history_state, preview_out, chat_out],
 
 
207
  )
208
 
209
+ clr_btn.click(
210
+ lambda: ("", None, "", [], "", "", []),
211
+ outputs=[prompt_in, file_in, url_in, history_state, code_out, preview_out, chat_out],
212
+ queue=False,
213
+ )
214
 
215
  if __name__ == "__main__":
216
  demo.queue().launch()