File size: 1,180 Bytes
199945f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import streamlit as st
from transformers import pipeline

# Title and description
st.title("Fine-Tuned Model Deployment")
st.markdown(
    """
    ### Welcome to the Fine-Tuned Model Inference App!
    Enter your input text below, and the model will generate a response based on the fine-tuned Llama model.
    """
)

# Load the model
@st.cache_resource
def load_model():
    return pipeline("text-generation", model="Partababc/Mixtral-function-call-finetune")

model = load_model()

# Input box for user query
user_input = st.text_area("Enter your prompt:", height=150)

# Generate text button
if st.button("Generate Text"):
    if user_input.strip():
        with st.spinner("Generating response..."):
            # Generate response
            result = model(user_input, max_length=150, num_return_sequences=1)
            generated_text = result[0]["generated_text"]

            # Display the generated text
            st.markdown("### Generated Text:")
            st.write(generated_text)
    else:
        st.warning("Please enter some text to generate a response.")

# Footer
st.markdown("---")
st.markdown("**Fine-Tuned Model powered by Hugging Face Transformers.**")