Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -101,7 +101,7 @@ def execute_pip_command(command, add_message):
|
|
101 |
break
|
102 |
if output:
|
103 |
# Corrected line: Combine the f-string parts
|
104 |
-
add_message("System", f"
|
105 |
time.sleep(0.1) # Simulate delay for more realistic streaming
|
106 |
rc = process.poll()
|
107 |
return rc
|
@@ -266,19 +266,16 @@ if CURRENT_APP["code"]:
|
|
266 |
inputs = tokenizer(code_area, return_tensors="pt")
|
267 |
output = model.generate(**inputs, max_length=500, num_return_sequences=1)
|
268 |
output = tokenizer.decode(output[0], skip_special_tokens=True)
|
269 |
-
st.success(f"Code executed successfully
|
270 |
except Exception as e:
|
271 |
-
st.error(f"Error executing code: {e}")
|
272 |
|
273 |
# --- Code Editing ---
|
274 |
st.markdown("## Edit Your Code:")
|
275 |
if st.button("Edit Code"):
|
276 |
try:
|
277 |
# Use Hugging Face's text-generation pipeline for code editing
|
278 |
-
prompt = f"Improve the following Python code
|
279 |
-
|
280 |
-
|
281 |
-
python\n{code_area}\n
|
282 |
|
283 |
inputs = tokenizer(prompt, return_tensors="pt")
|
284 |
output = model.generate(**inputs, max_length=500, num_return_sequences=1)
|
@@ -287,11 +284,11 @@ inputs = tokenizer(prompt, return_tensors="pt")
|
|
287 |
|
288 |
python\n")[1].split("\n
|
289 |
|
290 |
-
st.success(f"Code edited successfully!\n{edited_code}")
|
291 |
update_project_data("code", edited_code)
|
292 |
code_area.value = edited_code
|
293 |
except Exception as e:
|
294 |
-
st.error(f"Error editing code: {e}")
|
295 |
|
296 |
# --- Prebuilt Tools ---
|
297 |
st.markdown("## Prebuilt Tools:")
|
|
|
101 |
break
|
102 |
if output:
|
103 |
# Corrected line: Combine the f-string parts
|
104 |
+
add_message("System", f"'"{output.strip()}"'")
|
105 |
time.sleep(0.1) # Simulate delay for more realistic streaming
|
106 |
rc = process.poll()
|
107 |
return rc
|
|
|
266 |
inputs = tokenizer(code_area, return_tensors="pt")
|
267 |
output = model.generate(**inputs, max_length=500, num_return_sequences=1)
|
268 |
output = tokenizer.decode(output[0], skip_special_tokens=True)
|
269 |
+
st.success(f"'"Code executed successfully!{output}"'")
|
270 |
except Exception as e:
|
271 |
+
st.error(f"'"Error executing code: {e}"'")
|
272 |
|
273 |
# --- Code Editing ---
|
274 |
st.markdown("## Edit Your Code:")
|
275 |
if st.button("Edit Code"):
|
276 |
try:
|
277 |
# Use Hugging Face's text-generation pipeline for code editing
|
278 |
+
prompt = f"'"Improve the following Python code: python {code_area}"'"
|
|
|
|
|
|
|
279 |
|
280 |
inputs = tokenizer(prompt, return_tensors="pt")
|
281 |
output = model.generate(**inputs, max_length=500, num_return_sequences=1)
|
|
|
284 |
|
285 |
python\n")[1].split("\n
|
286 |
|
287 |
+
st.success(f"'"Code edited successfully!\n{edited_code}"'"")
|
288 |
update_project_data("code", edited_code)
|
289 |
code_area.value = edited_code
|
290 |
except Exception as e:
|
291 |
+
st.error(f"'"Error editing code: {e}")
|
292 |
|
293 |
# --- Prebuilt Tools ---
|
294 |
st.markdown("## Prebuilt Tools:")
|