acecalisto3 commited on
Commit
835d56a
·
verified ·
1 Parent(s): b62e04c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -32
app.py CHANGED
@@ -12,6 +12,10 @@ from llama_cpp import Llama, LlamaCppPythonProvider, LlamaCppAgent
12
  from llama_cpp.llama_cpp_agent import get_messages_formatter_type, get_context_by_model
13
  from io import StringIO
14
  import tempfile
 
 
 
 
15
 
16
  # --- Global Variables ---
17
  CURRENT_PROJECT = {} # Store project data (code, packages, etc.)
@@ -105,19 +109,16 @@ def execute_pip_command(command, add_message):
105
  time.sleep(0.1) # Simulate delay for more realistic streaming
106
  rc = process.poll()
107
  return rc
108
-
109
  def generate_text(input_text):
110
  """Generates text using the loaded language model."""
111
  inputs = tokenizer(input_text, return_tensors="pt")
112
  output = model.generate(**inputs, max_length=500, num_return_sequences=1)
113
  return tokenizer.decode(output[0], skip_special_tokens=True)
114
-
115
  # --- AI Agent Functions ---
116
  def analyze_user_intent(user_input):
117
  """Classifies the user's intent based on their input."""
118
  classification = classifier(user_input)
119
  return classification[0]['label']
120
-
121
  def generate_mini_app_ideas(theme):
122
  """Generates mini-app ideas based on the user's theme."""
123
  if theme.lower() == "productivity":
@@ -140,14 +141,12 @@ def generate_mini_app_ideas(theme):
140
  ]
141
  else:
142
  return ["No matching mini-apps found. Try a different theme."]
143
-
144
  def generate_app_code(app_name, app_description, model_name, history):
145
  """Generates code for the selected mini-app using the specified GGUF model."""
146
  prompt = f"'"Write a Python script for a {app_description} named {app_name} using Gradio and Streamlit:"
147
  agent = get_agent(model_name)
148
  generated_code = agent.chat(prompt, history)
149
  return generated_code
150
-
151
  def execute_terminal_command(command):
152
  """Executes a terminal command and returns the output."""
153
  try:
@@ -155,7 +154,6 @@ def execute_terminal_command(command):
155
  return result.strip(), None
156
  except subprocess.CalledProcessError as e:
157
  return e.output.strip(), str(e)
158
-
159
  def install_package(package_name):
160
  """Installs a package using pip."""
161
  output, error = execute_terminal_command(f"'"pip install {package_name}")
@@ -163,22 +161,17 @@ def install_package(package_name):
163
  return f"'"Error installing package: {error}"
164
  else:
165
  return f"'"Package `{package_name}` installed successfully."
166
-
167
  def get_project_data():
168
  """Returns the current project data."""
169
  return CURRENT_PROJECT
170
-
171
  def update_project_data(key, value):
172
  """Updates the project data."""
173
  CURRENT_PROJECT[key] = value
174
-
175
  def handle_chat(input_text, history):
176
  """Handles user input in the chat interface."""
177
  def add_message(sender, message):
178
  history.append((sender, message))
179
-
180
  add_message("User", input_text)
181
-
182
  if input_text.startswith("pip install ") or input_text.startswith("https://pypi.org/project/"):
183
  package_name = extract_package_name(input_text)
184
  add_message("System", f"'"Installing `{package_name}`...")
@@ -186,7 +179,6 @@ def handle_chat(input_text, history):
186
  add_message("System", result)
187
  update_project_data("packages", CURRENT_PROJECT.get("packages", []) + [package_name])
188
  return history, dynamic_functions
189
-
190
  # --- AI Agent Interaction ---
191
  if USER_INTENT is None:
192
  add_message("System", analyze_user_intent(input_text))
@@ -203,10 +195,7 @@ def handle_chat(input_text, history):
203
  add_message("System", f"'"Generating code for {app_description}...")
204
  code = generate_app_code(selected_app, app_description, "CodeQwen", history) # Use CodeQwen by default
205
  add_message("System", f"'"
206
-
207
-
208
  python\n{code}\n
209
-
210
  add_message("System", "Code generated! What else can I do for you?")
211
  update_project_data("code", code)
212
  update_project_data("app_name", selected_app)
@@ -215,29 +204,23 @@ add_message("System", "Code generated! What else can I do for you?")
215
  add_message("System", "Please choose from the provided mini-app ideas.")
216
  else:
217
  add_message("System", "You already have an app in progress. Do you want to start over?")
218
-
219
  return history, dynamic_functions
220
-
221
  # --- Prebuilt Tools ---
222
  def generate_code_tool(input_text, history):
223
  """Prebuilt tool for code generation."""
224
  code = generate_app_code("MyTool", "A tool to do something", "CodeQwen", history) # Use CodeQwen by default
225
  return f"'" python {code}"
226
-
227
  def analyze_code_tool(input_text, history):
228
  """Prebuilt tool for code analysis."""
229
  agent = get_agent("Codestral")
230
  analysis = agent.chat(input_text, history)
231
  return analysis
232
-
233
  # --- Streamlit Interface ---
234
  st.title("AI4ME: Your Personal AI App Workshop")
235
  st.markdown("## Let's build your dream app together! 🤖")
236
-
237
  # --- Hugging Face Token Input ---
238
  huggingface_token = st.text_input("Enter your Hugging Face Token", type="password", key="huggingface_token")
239
  os.environ["huggingface_token"] = huggingface_token
240
-
241
  # --- Chat Interface ---
242
  chat_history = []
243
  chat_input = st.text_input("Tell me your idea...", key="chat_input")
@@ -245,16 +228,13 @@ if chat_input:
245
  chat_history, dynamic_functions = handle_chat(chat_input, chat_history)
246
  for sender, message in chat_history:
247
  st.markdown(f"**{sender}:** {message}")
248
-
249
  # --- Code Execution and Deployment ---
250
  if CURRENT_APP["code"]:
251
  st.markdown("## Your App Code:")
252
  code_area = st.text_area("Your App Code", value=CURRENT_APP["code"], key="code_area")
253
-
254
  st.markdown("## Deploy Your App (Coming Soon!)")
255
  # Add deployment functionality here using Streamlit's deployment features.
256
  # For example, you could use Streamlit's `st.button` to trigger deployment.
257
-
258
  # --- Code Execution ---
259
  st.markdown("## Run Your App:")
260
  if st.button("Execute Code"):
@@ -266,27 +246,21 @@ if CURRENT_APP["code"]:
266
  st.success(f"'"Code executed successfully!{output}")
267
  except Exception as e:
268
  st.error(f"'"Error executing code: {e}")
269
-
270
  # --- Code Editing ---
271
  st.markdown("## Edit Your Code:")
272
  if st.button("Edit Code"):
273
  try:
274
  # Use Hugging Face's text-generation pipeline for code editing
275
  prompt = f"'"Improve the following Python code: python {code_area}"
276
-
277
  inputs = tokenizer(prompt, return_tensors="pt")
278
  output = model.generate(**inputs, max_length=500, num_return_sequences=1)
279
  edited_code = tokenizer.decode(output[0], skip_special_tokens=True).split("
280
-
281
-
282
  python\n")[1].split("\n
283
-
284
  st.success(f"'"Code edited successfully!\n{edited_code}")
285
  update_project_data("code", edited_code)
286
  code_area.value = edited_code
287
  except Exception as e:
288
  st.error(f"'"Error editing code: {e}")
289
-
290
  # --- Prebuilt Tools ---
291
  st.markdown("## Prebuilt Tools:")
292
  with st.expander("Generate Code"):
@@ -294,13 +268,11 @@ with st.expander("Generate Code"):
294
  if st.button("Generate"):
295
  code_output = generate_code_tool(code_input, chat_history)
296
  st.markdown(code_output)
297
-
298
  with st.expander("Analyze Code"):
299
  code_input = st.text_area("Enter your code:", key="analyze_code_input")
300
  if st.button("Analyze"):
301
  analysis_output = analyze_code_tool(code_input, chat_history)
302
  st.markdown(analysis_output)
303
-
304
  # --- Additional Features ---
305
  # Add features like:
306
  # - Code editing
 
12
  from llama_cpp.llama_cpp_agent import get_messages_formatter_type, get_context_by_model
13
  from io import StringIO
14
  import tempfile
15
+ import importlib
16
+ import time
17
+ import sys
18
+ from flask import Flask
19
 
20
  # --- Global Variables ---
21
  CURRENT_PROJECT = {} # Store project data (code, packages, etc.)
 
109
  time.sleep(0.1) # Simulate delay for more realistic streaming
110
  rc = process.poll()
111
  return rc
 
112
  def generate_text(input_text):
113
  """Generates text using the loaded language model."""
114
  inputs = tokenizer(input_text, return_tensors="pt")
115
  output = model.generate(**inputs, max_length=500, num_return_sequences=1)
116
  return tokenizer.decode(output[0], skip_special_tokens=True)
 
117
  # --- AI Agent Functions ---
118
  def analyze_user_intent(user_input):
119
  """Classifies the user's intent based on their input."""
120
  classification = classifier(user_input)
121
  return classification[0]['label']
 
122
  def generate_mini_app_ideas(theme):
123
  """Generates mini-app ideas based on the user's theme."""
124
  if theme.lower() == "productivity":
 
141
  ]
142
  else:
143
  return ["No matching mini-apps found. Try a different theme."]
 
144
  def generate_app_code(app_name, app_description, model_name, history):
145
  """Generates code for the selected mini-app using the specified GGUF model."""
146
  prompt = f"'"Write a Python script for a {app_description} named {app_name} using Gradio and Streamlit:"
147
  agent = get_agent(model_name)
148
  generated_code = agent.chat(prompt, history)
149
  return generated_code
 
150
  def execute_terminal_command(command):
151
  """Executes a terminal command and returns the output."""
152
  try:
 
154
  return result.strip(), None
155
  except subprocess.CalledProcessError as e:
156
  return e.output.strip(), str(e)
 
157
  def install_package(package_name):
158
  """Installs a package using pip."""
159
  output, error = execute_terminal_command(f"'"pip install {package_name}")
 
161
  return f"'"Error installing package: {error}"
162
  else:
163
  return f"'"Package `{package_name}` installed successfully."
 
164
  def get_project_data():
165
  """Returns the current project data."""
166
  return CURRENT_PROJECT
 
167
  def update_project_data(key, value):
168
  """Updates the project data."""
169
  CURRENT_PROJECT[key] = value
 
170
  def handle_chat(input_text, history):
171
  """Handles user input in the chat interface."""
172
  def add_message(sender, message):
173
  history.append((sender, message))
 
174
  add_message("User", input_text)
 
175
  if input_text.startswith("pip install ") or input_text.startswith("https://pypi.org/project/"):
176
  package_name = extract_package_name(input_text)
177
  add_message("System", f"'"Installing `{package_name}`...")
 
179
  add_message("System", result)
180
  update_project_data("packages", CURRENT_PROJECT.get("packages", []) + [package_name])
181
  return history, dynamic_functions
 
182
  # --- AI Agent Interaction ---
183
  if USER_INTENT is None:
184
  add_message("System", analyze_user_intent(input_text))
 
195
  add_message("System", f"'"Generating code for {app_description}...")
196
  code = generate_app_code(selected_app, app_description, "CodeQwen", history) # Use CodeQwen by default
197
  add_message("System", f"'"
 
 
198
  python\n{code}\n
 
199
  add_message("System", "Code generated! What else can I do for you?")
200
  update_project_data("code", code)
201
  update_project_data("app_name", selected_app)
 
204
  add_message("System", "Please choose from the provided mini-app ideas.")
205
  else:
206
  add_message("System", "You already have an app in progress. Do you want to start over?")
 
207
  return history, dynamic_functions
 
208
  # --- Prebuilt Tools ---
209
  def generate_code_tool(input_text, history):
210
  """Prebuilt tool for code generation."""
211
  code = generate_app_code("MyTool", "A tool to do something", "CodeQwen", history) # Use CodeQwen by default
212
  return f"'" python {code}"
 
213
  def analyze_code_tool(input_text, history):
214
  """Prebuilt tool for code analysis."""
215
  agent = get_agent("Codestral")
216
  analysis = agent.chat(input_text, history)
217
  return analysis
 
218
  # --- Streamlit Interface ---
219
  st.title("AI4ME: Your Personal AI App Workshop")
220
  st.markdown("## Let's build your dream app together! 🤖")
 
221
  # --- Hugging Face Token Input ---
222
  huggingface_token = st.text_input("Enter your Hugging Face Token", type="password", key="huggingface_token")
223
  os.environ["huggingface_token"] = huggingface_token
 
224
  # --- Chat Interface ---
225
  chat_history = []
226
  chat_input = st.text_input("Tell me your idea...", key="chat_input")
 
228
  chat_history, dynamic_functions = handle_chat(chat_input, chat_history)
229
  for sender, message in chat_history:
230
  st.markdown(f"**{sender}:** {message}")
 
231
  # --- Code Execution and Deployment ---
232
  if CURRENT_APP["code"]:
233
  st.markdown("## Your App Code:")
234
  code_area = st.text_area("Your App Code", value=CURRENT_APP["code"], key="code_area")
 
235
  st.markdown("## Deploy Your App (Coming Soon!)")
236
  # Add deployment functionality here using Streamlit's deployment features.
237
  # For example, you could use Streamlit's `st.button` to trigger deployment.
 
238
  # --- Code Execution ---
239
  st.markdown("## Run Your App:")
240
  if st.button("Execute Code"):
 
246
  st.success(f"'"Code executed successfully!{output}")
247
  except Exception as e:
248
  st.error(f"'"Error executing code: {e}")
 
249
  # --- Code Editing ---
250
  st.markdown("## Edit Your Code:")
251
  if st.button("Edit Code"):
252
  try:
253
  # Use Hugging Face's text-generation pipeline for code editing
254
  prompt = f"'"Improve the following Python code: python {code_area}"
 
255
  inputs = tokenizer(prompt, return_tensors="pt")
256
  output = model.generate(**inputs, max_length=500, num_return_sequences=1)
257
  edited_code = tokenizer.decode(output[0], skip_special_tokens=True).split("
 
 
258
  python\n")[1].split("\n
 
259
  st.success(f"'"Code edited successfully!\n{edited_code}")
260
  update_project_data("code", edited_code)
261
  code_area.value = edited_code
262
  except Exception as e:
263
  st.error(f"'"Error editing code: {e}")
 
264
  # --- Prebuilt Tools ---
265
  st.markdown("## Prebuilt Tools:")
266
  with st.expander("Generate Code"):
 
268
  if st.button("Generate"):
269
  code_output = generate_code_tool(code_input, chat_history)
270
  st.markdown(code_output)
 
271
  with st.expander("Analyze Code"):
272
  code_input = st.text_area("Enter your code:", key="analyze_code_input")
273
  if st.button("Analyze"):
274
  analysis_output = analyze_code_tool(code_input, chat_history)
275
  st.markdown(analysis_output)
 
276
  # --- Additional Features ---
277
  # Add features like:
278
  # - Code editing