Partababc commited on
Commit
199945f
·
verified ·
1 Parent(s): 55f0a29

requirements.txt

Browse files

streamlit
transformers
torch

Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+
4
+ # Title and description
5
+ st.title("Fine-Tuned Model Deployment")
6
+ st.markdown(
7
+ """
8
+ ### Welcome to the Fine-Tuned Model Inference App!
9
+ Enter your input text below, and the model will generate a response based on the fine-tuned Llama model.
10
+ """
11
+ )
12
+
13
+ # Load the model
14
+ @st.cache_resource
15
+ def load_model():
16
+ return pipeline("text-generation", model="Partababc/Mixtral-function-call-finetune")
17
+
18
+ model = load_model()
19
+
20
+ # Input box for user query
21
+ user_input = st.text_area("Enter your prompt:", height=150)
22
+
23
+ # Generate text button
24
+ if st.button("Generate Text"):
25
+ if user_input.strip():
26
+ with st.spinner("Generating response..."):
27
+ # Generate response
28
+ result = model(user_input, max_length=150, num_return_sequences=1)
29
+ generated_text = result[0]["generated_text"]
30
+
31
+ # Display the generated text
32
+ st.markdown("### Generated Text:")
33
+ st.write(generated_text)
34
+ else:
35
+ st.warning("Please enter some text to generate a response.")
36
+
37
+ # Footer
38
+ st.markdown("---")
39
+ st.markdown("**Fine-Tuned Model powered by Hugging Face Transformers.**")