Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,35 +1,30 @@
|
|
1 |
import os
|
2 |
-
import
|
3 |
-
from huggingface_hub import login
|
4 |
from transformers import pipeline
|
|
|
5 |
|
|
|
6 |
# Load the gated model
|
7 |
model_name = "RickyDeSkywalker/TheoremLlama"
|
8 |
-
|
9 |
-
login(HF_TOKEN)
|
10 |
|
11 |
-
generator = pipeline('text-generation', model=model_name, token=
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
return result[0]['generated_text']
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
description = "Enter a natural language prompt to generate Lean 4 code."
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
description=description
|
32 |
-
)
|
33 |
|
34 |
-
|
35 |
-
interface.launch()
|
|
|
1 |
import os
|
2 |
+
import streamlit as st
|
|
|
3 |
from transformers import pipeline
|
4 |
+
from dotenv import load_dotenv
|
5 |
|
6 |
+
load_dotenv()
|
7 |
# Load the gated model
|
8 |
model_name = "RickyDeSkywalker/TheoremLlama"
|
9 |
+
token = os.environ.get("HF_TOKEN")
|
|
|
10 |
|
11 |
+
generator = pipeline('text-generation', model=model_name, token=token)
|
12 |
|
13 |
+
# Streamlit app
|
14 |
+
st.title("Lean 4 Code Generation with TheoremLlama")
|
15 |
+
st.write("Enter a natural language prompt to generate Lean 4 code.")
|
|
|
16 |
|
17 |
+
# Text input for user prompt
|
18 |
+
prompt = st.text_area("Prompt", value="Prove that the sum of two even numbers is even.")
|
|
|
19 |
|
20 |
+
# Generate button
|
21 |
+
if st.button("Generate Lean 4 Code"):
|
22 |
+
with st.spinner("Generating Lean 4 code..."):
|
23 |
+
# Generate code based on the prompt
|
24 |
+
result = generator(prompt, max_length=1000, num_return_sequences=1)
|
25 |
+
generated_code = result[0]['generated_text']
|
26 |
+
|
27 |
+
# Display the generated code
|
28 |
+
st.code(generated_code, language='lean')
|
|
|
|
|
29 |
|
30 |
+
st.markdown("Developed by [Your Name]")
|
|