File size: 5,733 Bytes
f7a2a53 8deab94 efa7713 8deab94 e16be07 8deab94 f7a2a53 8deab94 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
# adapted from:
# https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig, pipeline
import streamlit as st
# Define the model repository
REPO_NAME = 'schuler/experimental-JP47D20'
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(REPO_NAME, trust_remote_code=True)
generator_conf = GenerationConfig.from_pretrained(REPO_NAME)
model = AutoModelForCausalLM.from_pretrained(REPO_NAME, trust_remote_code=True)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Configure the Streamlit app
st.set_page_config(page_title="Experimental Model - Under Construction", page_icon="π€")
st.title("Experimental Model - Under Construction")
st.markdown(f"*This chat uses the {REPO_NAME} model. Feel free to ask questions such as 'What is biology?' or 'What is the human body?'*")
# Initialize session state for avatars
if "avatars" not in st.session_state:
st.session_state.avatars = {'user': None, 'assistant': None}
# Initialize session state for user text input
if 'user_text' not in st.session_state:
st.session_state.user_text = None
# Initialize session state for model parameters
if "max_response_length" not in st.session_state:
st.session_state.max_response_length = 256
if "system_message" not in st.session_state:
st.session_state.system_message = "You are a friendly AI conversing with a human user."
if "starter_message" not in st.session_state:
st.session_state.starter_message = "Hello, there! How can I help you today?"
# Sidebar for settings
with st.sidebar:
st.header("System Settings")
# AI Settings
st.session_state.system_message = st.text_area(
"System Message", value=st.session_state.system_message
)
st.session_state.starter_message = st.text_area(
'First AI Message', value=st.session_state.starter_message
)
# Model Settings
st.session_state.max_response_length = st.number_input(
"Max Response Length", value=st.session_state.max_response_length
)
# Avatar Selection
st.markdown("*Select Avatars:*")
col1, col2 = st.columns(2)
with col1:
st.session_state.avatars['assistant'] = st.selectbox(
"AI Avatar", options=["π€", "π¬", "π€"], index=0
)
with col2:
st.session_state.avatars['user'] = st.selectbox(
"User Avatar", options=["π€", "π±ββοΈ", "π¨πΎ", "π©", "π§πΎ"], index=0
)
# Reset Chat History
reset_history = st.button("Reset Chat History")
# Initialize or reset chat history
if "chat_history" not in st.session_state or reset_history:
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}]
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
"""
Generates a response from the chatbot model.
Args:
system_message (str): The system message for the conversation.
chat_history (list): The list of previous chat messages.
user_text (str): The user's input text.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
tuple: A tuple containing the generated response and the updated chat history.
"""
# Build the conversation prompt
prompt = ""
# f"{system_message}\nCurrent Conversation:\n"
for message in chat_history:
role = "<|assistant|>" if message['role'] == 'assistant' else "<|user|>"
prompt += f"\n{role}\n{message['content']}\n"
prompt += f"\n<|user|>\n{user_text}\n<|assistant|>\n"
# Generate the response
response_output = generator(
prompt,
generation_config=generator_conf,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=0.5,
repetition_penalty=1.2
)
generated_text = response_output[0]['generated_text']
# Extract the assistant's response
assistant_response = generated_text[len(prompt):].strip()
# Update the chat history
chat_history.append({'role': 'user', 'content': user_text})
chat_history.append({'role': 'assistant', 'content': assistant_response})
return assistant_response, chat_history
# Chat interface
chat_interface = st.container()
with chat_interface:
output_container = st.container()
# Display chat messages
with output_container:
for message in st.session_state.chat_history:
if message['role'] == 'system':
continue
with st.chat_message(message['role'], avatar=st.session_state.avatars[message['role']]):
st.markdown(message['content'])
# User input area (moved to the bottom)
st.session_state.user_text = st.chat_input(placeholder="Enter your text here.")
# When the user enters new text
if st.session_state.user_text:
# Display the user's message
with st.chat_message("user", avatar=st.session_state.avatars['user']):
st.markdown(st.session_state.user_text)
# Display a spinner while generating the response
with st.chat_message("assistant", avatar=st.session_state.avatars['assistant']):
with st.spinner("Thinking..."):
# Generate the assistant's response
response, st.session_state.chat_history = get_response(
system_message=st.session_state.system_message,
user_text=st.session_state.user_text,
chat_history=st.session_state.chat_history,
max_new_tokens=st.session_state.max_response_length,
)
st.markdown(response)
# Clear the user input
st.session_state.user_text = None
|