Spaces:
Sleeping
Sleeping
File size: 3,131 Bytes
47fd10e 99287f5 47fd10e c586aa5 47fd10e 0c103d5 47fd10e fd77339 99287f5 49e0436 fd77339 c586aa5 47fd10e 0c103d5 fd77339 47fd10e 7e6cd24 47fd10e c586aa5 47fd10e 7e6cd24 0c103d5 47fd10e 7e6cd24 47fd10e 7e6cd24 0c103d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import os
import requests
import streamlit as st
from PyPDF2 import PdfReader
# Get the Hugging Face API Token from environment variables
HF_API_TOKEN = os.getenv("HF_API_KEY")
if not HF_API_TOKEN:
raise ValueError("Hugging Face API Token is not set in the environment variables.")
# Hugging Face API URL and header for Gemma 27B-it model
GEMMA_27B_API_URL = "https://api-inference.huggingface.co/models/google/gemma-2-27b-it"
HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
def query_model(api_url, payload):
response = requests.post(api_url, headers=HEADERS, json=payload)
if response.status_code == 200:
return response.json()
else:
raise ValueError(f"Request failed with status code {response.status_code}: {response.text}")
def extract_pdf_text(uploaded_file):
pdf_text = ""
pdf_reader = PdfReader(uploaded_file)
for page in pdf_reader.pages:
pdf_text += page.extract_text()
return pdf_text
# Streamlit app
st.set_page_config(page_title="Gemma 27B-it Chatbot Interface", layout="wide")
st.title("Gemma 27B-it Chatbot Interface")
st.write("Gemma 27B-it Chatbot Interface")
# Initialize session state for conversation and uploaded file
if "conversation" not in st.session_state:
st.session_state.conversation = []
# File uploader for PDF
uploaded_file = st.file_uploader("Upload a PDF", type="pdf")
# Handle PDF upload and text extraction
if uploaded_file:
pdf_text = extract_pdf_text(uploaded_file)
st.write("### PDF Text Extracted:")
st.write(pdf_text)
# User input for question
question = st.text_input("Ask a question...", "")
# Handle user input and Gemma 27B-it model response
if st.button("Send") and question:
try:
with st.spinner("Waiting for the model to respond..."):
response = query_model(GEMMA_27B_API_URL, {"inputs": question})
answer = response.get("generated_text", "No response")
st.write(f"**Gemma 27B-it:** {answer}")
st.session_state.conversation.append((question, answer))
except ValueError as e:
st.error(str(e))
# Custom CSS for chat bubbles
st.markdown(
"""
<style>
.chat-container {
display: flex;
flex-direction: column;
gap: 10px;
margin-top: 20px;
}
.user-message {
align-self: flex-end;
background-color: #dcf8c6;
padding: 10px 14px;
border-radius: 14px;
max-width: 80%;
}
.bot-message {
align-self: flex-start;
background-color: #fff;
padding: 10px 14px;
border-radius: 14px;
max-width: 80%;
}
</style>
""",
unsafe_allow_html=True
)
# Display the conversation
if st.session_state.conversation:
st.write('<div class="chat-container">', unsafe_allow_html=True)
for user_message, bot_message in st.session_state.conversation:
st.write(f'<div class="user-message">You: {user_message}</div>', unsafe_allow_html=True)
st.write(f'<div class="bot-message">Gemma 27B-it: {bot_message}</div>', unsafe_allow_html=True)
st.write('</div>', unsafe_allow_html=True) |