Partababc's picture
requirements.txt
199945f verified
import streamlit as st
from transformers import pipeline
# Title and description
st.title("Fine-Tuned Model Deployment")
st.markdown(
"""
### Welcome to the Fine-Tuned Model Inference App!
Enter your input text below, and the model will generate a response based on the fine-tuned Llama model.
"""
)
# Load the model
@st.cache_resource
def load_model():
return pipeline("text-generation", model="Partababc/Mixtral-function-call-finetune")
model = load_model()
# Input box for user query
user_input = st.text_area("Enter your prompt:", height=150)
# Generate text button
if st.button("Generate Text"):
if user_input.strip():
with st.spinner("Generating response..."):
# Generate response
result = model(user_input, max_length=150, num_return_sequences=1)
generated_text = result[0]["generated_text"]
# Display the generated text
st.markdown("### Generated Text:")
st.write(generated_text)
else:
st.warning("Please enter some text to generate a response.")
# Footer
st.markdown("---")
st.markdown("**Fine-Tuned Model powered by Hugging Face Transformers.**")