mgbam commited on
Commit
c928d36
·
verified ·
1 Parent(s): 588ca16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -148
app.py CHANGED
@@ -1,201 +1,182 @@
1
  # app.py
2
-
3
  """
4
- Main application file for SHASHA AI, a Gradio-based AI code generation tool.
5
 
6
- Provides a UI for generating code in many languages using various AI models.
7
- Supports text prompts, file uploads, website scraping, optional web search,
8
- and live previews of HTML output.
9
  """
10
 
 
11
  import gradio as gr
12
- from typing import Optional, Dict, List, Tuple, Any
 
13
 
14
- # --- Local module imports ---
15
- from constants import (
16
  HTML_SYSTEM_PROMPT,
17
  TRANSFORMERS_JS_SYSTEM_PROMPT,
 
18
  AVAILABLE_MODELS,
19
  DEMO_LIST,
 
 
20
  )
21
- from hf_client import get_inference_client
22
- from tavily_search import enhance_query_with_search
23
- from utils import (
24
- extract_text_from_file,
25
- extract_website_content,
26
- apply_search_replace_changes,
27
  history_to_messages,
28
  history_to_chatbot_messages,
29
  remove_code_block,
30
  parse_transformers_js_output,
31
  format_transformers_js_output,
 
 
 
 
 
 
 
32
  )
33
- from deploy import send_to_sandbox
34
 
35
- # --- Type aliases ---
36
  History = List[Tuple[str, str]]
37
- Model = Dict[str, Any]
38
-
39
- # --- Supported languages for dropdown ---
40
- SUPPORTED_LANGUAGES = [
41
- "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
42
- "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
43
- "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
44
- "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
45
- "sql-gpSQL", "sql-sparkSQL", "sql-esper"
46
- ]
47
-
48
- def get_model_details(name: str) -> Optional[Model]:
49
- for m in AVAILABLE_MODELS:
50
- if m["name"] == name:
51
- return m
52
- return None
53
-
54
- def generation_code(
55
- query: Optional[str],
56
- file: Optional[str],
57
  website_url: Optional[str],
58
- current_model: Model,
59
  enable_search: bool,
60
  language: str,
61
  history: Optional[History],
62
  ) -> Tuple[str, History, str, List[Dict[str, str]]]:
 
63
  query = query or ""
64
  history = history or []
65
- try:
66
- # Choose system prompt based on language
67
- if language == "html":
68
- system_prompt = HTML_SYSTEM_PROMPT
69
- elif language == "transformers.js":
70
- system_prompt = TRANSFORMERS_JS_SYSTEM_PROMPT
71
- else:
72
- # Generic fallback prompt
73
- system_prompt = (
74
- f"You are an expert {language} developer. "
75
- f"Write clean, idiomatic {language} code based on the user's request."
76
- )
77
 
78
- model_id = current_model["id"]
79
- # Determine provider
80
- if model_id.startswith("openai/") or model_id in {"gpt-4", "gpt-3.5-turbo"}:
81
- provider = "openai"
82
- elif model_id.startswith("gemini/") or model_id.startswith("google/"):
83
- provider = "gemini"
84
- elif model_id.startswith("fireworks-ai/"):
85
- provider = "fireworks-ai"
86
- else:
87
- provider = "auto"
88
-
89
- # Build message history
90
- msgs = history_to_messages(history, system_prompt)
91
- context = query
92
- if file:
93
- ftext = extract_text_from_file(file)
94
- context += f"\n\n[Attached file]\n{ftext[:5000]}"
95
- if website_url:
96
- wtext = extract_website_content(website_url)
97
- if not wtext.startswith("Error"):
98
- context += f"\n\n[Website content]\n{wtext[:8000]}"
99
- final_q = enhance_query_with_search(context, enable_search)
100
- msgs.append({"role": "user", "content": final_q})
101
-
102
- # Call the model
103
- client = get_inference_client(model_id, provider)
104
- resp = client.chat.completions.create(
105
- model=model_id,
106
- messages=msgs,
107
- max_tokens=16000,
108
- temperature=0.1
109
- )
110
- content = resp.choices[0].message.content
111
-
112
- except Exception as e:
113
- err = f"❌ **Error:**\n```\n{e}\n```"
114
- history.append((query, err))
115
- return "", history, "", history_to_chatbot_messages(history)
116
-
117
- # Process model output
118
  if language == "transformers.js":
119
- files = parse_transformers_js_output(content)
120
- code = format_transformers_js_output(files)
121
  preview = send_to_sandbox(files.get("index.html", ""))
122
  else:
123
- cleaned = remove_code_block(content)
124
- if history and history[-1][1] and not history[-1][1].startswith("❌"):
125
- code = apply_search_replace_changes(history[-1][1], cleaned)
126
- else:
127
- code = cleaned
128
  preview = send_to_sandbox(code) if language == "html" else ""
129
 
130
- new_hist = history + [(query, code)]
131
- chat = history_to_chatbot_messages(new_hist)
132
- return code, new_hist, preview, chat
 
 
 
 
133
 
134
- # --- Custom CSS ---
135
  CUSTOM_CSS = """
136
- body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; }
137
- #main_title { text-align: center; font-size: 2.5rem; margin-top: 1.5rem; }
138
- #subtitle { text-align: center; color: #4a5568; margin-bottom: 2.5rem; }
139
- .gradio-container { background-color: #f7fafc; }
140
- #gen_btn { box-shadow: 0 4px 6px rgba(0,0,0,0.1); }
141
  """
142
 
143
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), css=CUSTOM_CSS, title="Shasha AI") as demo:
144
- history_state = gr.State([])
145
- initial_model = AVAILABLE_MODELS[0]
146
- model_state = gr.State(initial_model)
147
 
148
- gr.Markdown("# 🚀 Shasha AI", elem_id="main_title")
149
- gr.Markdown("Your AI partner for generating, modifying, and understanding code.", elem_id="subtitle")
 
 
150
 
 
151
  with gr.Row():
152
- with gr.Column(scale=1):
153
- gr.Markdown("### 1. Select Model")
154
- model_dd = gr.Dropdown(
 
155
  choices=[m["name"] for m in AVAILABLE_MODELS],
156
- value=initial_model["name"],
157
- label="AI Model"
158
  )
159
 
160
- gr.Markdown("### 2. Provide Context")
161
  with gr.Tabs():
162
- with gr.Tab("📝 Prompt"):
163
- prompt_in = gr.Textbox(lines=7, placeholder="Describe your request...", show_label=False)
164
- with gr.Tab("📄 File"):
165
- file_in = gr.File(type="filepath")
166
- with gr.Tab("🌐 Website"):
167
- url_in = gr.Textbox(placeholder="https://example.com")
168
-
169
- gr.Markdown("### 3. Configure Output")
170
- lang_dd = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="Target Language")
171
- search_chk = gr.Checkbox(label="Enable Web Search")
 
 
 
 
172
 
173
- with gr.Row():
174
- clr_btn = gr.Button("Clear Session", variant="secondary")
175
- gen_btn = gr.Button("Generate Code", variant="primary", elem_id="gen_btn")
176
 
 
177
  with gr.Column(scale=2):
178
  with gr.Tabs():
179
- with gr.Tab("💻 Code"):
180
- code_out = gr.Code(language="html", interactive=True)
181
- with gr.Tab("👁️ Live Preview"):
182
- preview_out = gr.HTML()
183
- with gr.Tab("📜 History"):
184
- chat_out = gr.Chatbot(type="messages")
185
-
186
- model_dd.change(lambda n: get_model_details(n) or initial_model, inputs=[model_dd], outputs=[model_state])
187
-
188
- gen_btn.click(
189
- fn=generation_code,
190
- inputs=[prompt_in, file_in, url_in, model_state, search_chk, lang_dd, history_state],
191
- outputs=[code_out, history_state, preview_out, chat_out],
 
 
 
 
192
  )
193
 
194
- clr_btn.click(
195
- lambda: ("", None, "", [], "", "", []),
196
- outputs=[prompt_in, file_in, url_in, history_state, code_out, preview_out, chat_out],
197
- queue=False,
198
- )
199
 
200
  if __name__ == "__main__":
201
  demo.queue().launch()
 
1
  # app.py
 
2
  """
3
+ ShashaCode Builder – AI codegeneration playground.
4
 
5
+ Hugging Face Spaces + Gradio front‑end
6
+ Supports prompts, file upload, web‑site scraping, optional web search
7
+ Streams code back, shows live HTML preview, can deploy to a user Space
8
  """
9
 
10
+ # ───────────────────────────────────────── Imports
11
  import gradio as gr
12
+ from pathlib import Path
13
+ from typing import Dict, List, Optional, Tuple, Any
14
 
15
+ from constants import ( # all constants live here
 
16
  HTML_SYSTEM_PROMPT,
17
  TRANSFORMERS_JS_SYSTEM_PROMPT,
18
+ SYSTEM_PROMPTS,
19
  AVAILABLE_MODELS,
20
  DEMO_LIST,
21
+ GRADIO_SUPPORTED_LANGUAGES, # ← new import
22
+ SEARCH_START, DIVIDER, REPLACE_END,
23
  )
24
+
25
+ from hf_client import get_inference_client
26
+ from tavily_search import enhance_query_with_search
27
+ from utils import ( # helpers split into utils.py
 
 
28
  history_to_messages,
29
  history_to_chatbot_messages,
30
  remove_code_block,
31
  parse_transformers_js_output,
32
  format_transformers_js_output,
33
+ parse_svelte_output,
34
+ format_svelte_output,
35
+ apply_search_replace_changes,
36
+ apply_transformers_js_search_replace_changes,
37
+ extract_text_from_file,
38
+ extract_website_content,
39
+ get_gradio_language,
40
  )
41
+ from deploy import send_to_sandbox
42
 
43
+ # ───────────────────────────────────────── Type Aliases
44
  History = List[Tuple[str, str]]
45
+ ModelInfo = Dict[str, Any]
46
+
47
+ # ───────────────────────────────────────── Core Function
48
+ def generate_code(
49
+ query: str,
50
+ file_path: Optional[str],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  website_url: Optional[str],
52
+ model: ModelInfo,
53
  enable_search: bool,
54
  language: str,
55
  history: Optional[History],
56
  ) -> Tuple[str, History, str, List[Dict[str, str]]]:
57
+ """Main inference pipeline: build prompt → call model → post‑process."""
58
  query = query or ""
59
  history = history or []
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ # 1. pick system prompt
62
+ if language == "html": system = HTML_SYSTEM_PROMPT
63
+ elif language == "transformers.js": system = TRANSFORMERS_JS_SYSTEM_PROMPT
64
+ else: system = SYSTEM_PROMPTS.get(language, HTML_SYSTEM_PROMPT)
65
+
66
+ # 2. build message list
67
+ messages = history_to_messages(history, system)
68
+
69
+ ctx_parts = [query.strip()]
70
+
71
+ if file_path: ctx_parts += ["[File]", extract_text_from_file(file_path)[:5000]]
72
+ if website_url:
73
+ html = extract_website_content(website_url)
74
+ if not html.startswith("Error"):
75
+ ctx_parts += ["[Website]", html[:8000]]
76
+
77
+ user_query = "\n\n".join(ctx_parts)
78
+ user_query = enhance_query_with_search(user_query, enable_search)
79
+ messages.append({"role": "user", "content": user_query})
80
+
81
+ # 3. call model
82
+ client = get_inference_client(model["id"])
83
+ resp = client.chat.completions.create(
84
+ model=model["id"],
85
+ messages=messages,
86
+ max_tokens=16000,
87
+ temperature=0.15,
88
+ )
89
+ answer = resp.choices[0].message.content
90
+
91
+ # 4. post‑process
 
 
 
 
 
 
 
 
 
92
  if language == "transformers.js":
93
+ files = parse_transformers_js_output(answer)
94
+ code = format_transformers_js_output(files)
95
  preview = send_to_sandbox(files.get("index.html", ""))
96
  else:
97
+ clean = remove_code_block(answer)
98
+ if history and not history[-1][1].startswith("❌"):
99
+ clean = apply_search_replace_changes(history[-1][1], clean)
100
+ code = clean
 
101
  preview = send_to_sandbox(code) if language == "html" else ""
102
 
103
+ history.append((query, code))
104
+ chat_msgs = history_to_chatbot_messages(history)
105
+ return code, history, preview, chat_msgs
106
+
107
+
108
+ # ───────────────────────────────────────── UI
109
+ LOGO_PATH = "assets/logo.png" # ensure this file exists
110
 
 
111
  CUSTOM_CSS = """
112
+ body {font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;}
113
+ #logo {max-height:64px;margin:auto;}
 
 
 
114
  """
115
 
116
+ with gr.Blocks(css=CUSTOM_CSS, title="ShashaCode Builder") as demo:
117
+ state_history = gr.State([])
118
+ state_model = gr.State(AVAILABLE_MODELS[0])
 
119
 
120
+ # Header
121
+ with gr.Row():
122
+ gr.Image(LOGO_PATH, elem_id="logo", show_label=False, height=64)
123
+ gr.Markdown("## **AnyCoder AI**\nYour AI partner for generating, modifying & understanding code.")
124
 
125
+ # Sidebar (inputs)
126
  with gr.Row():
127
+ with gr.Column(scale=1, min_width=300):
128
+ # Model
129
+ dd_model = gr.Dropdown(
130
+ label="AI Model",
131
  choices=[m["name"] for m in AVAILABLE_MODELS],
132
+ value=AVAILABLE_MODELS[0]["name"],
 
133
  )
134
 
135
+ # Prompt / File / Website tabs
136
  with gr.Tabs():
137
+ with gr.Tab("Prompt"):
138
+ tb_prompt = gr.Textbox(label="Describe what you'd like to build…", lines=6)
139
+ with gr.Tab("File"):
140
+ inp_file = gr.File(label="Reference file", type="filepath")
141
+ with gr.Tab("Website"):
142
+ tb_url = gr.Textbox(label="URL to redesign")
143
+
144
+ # Output config
145
+ dd_lang = gr.Dropdown(
146
+ label="Target language",
147
+ choices=[l for l in GRADIO_SUPPORTED_LANGUAGES if l], # ← fixed list
148
+ value="html",
149
+ )
150
+ chk_search = gr.Checkbox(label="Enable Tavily Web Search")
151
 
152
+ # Buttons
153
+ btn_generate = gr.Button("Generate Code", variant="primary")
154
+ btn_clear = gr.Button("Clear Session", variant="secondary")
155
 
156
+ # Main panel (outputs)
157
  with gr.Column(scale=2):
158
  with gr.Tabs():
159
+ with gr.Tab("Code"):
160
+ out_code = gr.Code(language="html", show_label=False)
161
+ with gr.Tab("Preview"):
162
+ out_prev = gr.HTML()
163
+ with gr.Tab("History"):
164
+ out_hist = gr.Chatbot(type="messages")
165
+
166
+ # ─── Callbacks ─────────────────────────────────────────────
167
+ def _model_from_name(name):
168
+ return next((m for m in AVAILABLE_MODELS if m["name"] == name), AVAILABLE_MODELS[0])
169
+
170
+ dd_model.change(lambda n: _model_from_name(n), inputs=dd_model, outputs=state_model)
171
+
172
+ btn_generate.click(
173
+ fn=generate_code,
174
+ inputs=[tb_prompt, inp_file, tb_url, state_model, chk_search, dd_lang, state_history],
175
+ outputs=[out_code, state_history, out_prev, out_hist],
176
  )
177
 
178
+ btn_clear.click(lambda: ("", None, "", [], "", []),
179
+ outputs=[tb_prompt, inp_file, tb_url, state_history, out_code, out_prev])
 
 
 
180
 
181
  if __name__ == "__main__":
182
  demo.queue().launch()