hertogateis commited on
Commit
004ce70
·
verified ·
1 Parent(s): e0fae88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -54
app.py CHANGED
@@ -1,10 +1,9 @@
1
  import streamlit as st
2
  from transformers import T5ForConditionalGeneration, T5Tokenizer
3
  import torch
4
- import random
5
 
6
  # Load pre-trained T5 model and tokenizer
7
- model_name = "t5-small" # You can use "t5-base" or "t5-large" for better quality but slower response
8
  model = T5ForConditionalGeneration.from_pretrained(model_name)
9
  tokenizer = T5Tokenizer.from_pretrained(model_name)
10
 
@@ -12,64 +11,25 @@ tokenizer = T5Tokenizer.from_pretrained(model_name)
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  model.to(device)
14
 
15
- # Initialize chat history and conversation context
16
- if 'history' not in st.session_state:
17
- st.session_state['history'] = []
18
- if 'conversation' not in st.session_state:
19
- st.session_state['conversation'] = []
20
-
21
- # Define multiple system prompts to control bot's behavior
22
- system_prompts = [
23
- "You are a fun, casual chatbot. Keep the conversation light-hearted and interesting.",
24
- "You are a friendly assistant. Respond in a polite, friendly, and informative manner.",
25
- "You are an informative assistant. Respond clearly and concisely to any questions asked.",
26
- "You are a compassionate listener, always responding with kindness and empathy."
27
- ]
28
-
29
- # Select a random system prompt to start the conversation
30
- def get_system_prompt():
31
- return random.choice(system_prompts)
32
 
33
  def generate_response(input_text):
34
- # If it's the first interaction, add the system prompt to the conversation history
35
- if len(st.session_state['history']) == 0:
36
- system_prompt = get_system_prompt()
37
- st.session_state['conversation'].append(f"System: {system_prompt}")
38
- system_input = f"conversation: {system_prompt} "
39
- st.session_state['history'].append(system_input)
40
-
41
- # Prepare the user input by appending it to the history
42
- user_input = f"conversation: {input_text} "
43
-
44
- # Concatenate history (only user input after system prompt)
45
- full_input = "".join(st.session_state['history']) + user_input
46
-
47
- # Tokenize input text and generate response from the model
48
- input_ids = tokenizer.encode(full_input, return_tensors="pt").to(device)
49
- outputs = model.generate(input_ids, max_length=1000, num_beams=5, top_p=0.95, temperature=0.7, pad_token_id=tokenizer.eos_token_id)
50
-
51
- # Decode the model's output
52
  bot_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
53
 
54
- # Update the history with the new user input and the model's output
55
- st.session_state['history'].append(user_input)
56
- st.session_state['history'].append(f"bot: {bot_output} ")
57
-
58
- # Add both user input and bot response to the conversation history for display
59
- st.session_state['conversation'].append(f"You: {input_text}")
60
- st.session_state['conversation'].append(f"Bot: {bot_output}")
61
-
62
  return bot_output
63
 
64
- # Streamlit Interface
65
- st.title("Chat with T5")
66
-
67
- # Display the conversation history
68
- if st.session_state['conversation']:
69
- for message in st.session_state['conversation']:
70
- st.markdown(f"<p style='color:gray; padding:5px;'>{message}</p>", unsafe_allow_html=True)
71
-
72
- # Create input box for user
73
  user_input = st.text_input("You: ", "")
74
 
75
  if user_input:
 
1
  import streamlit as st
2
  from transformers import T5ForConditionalGeneration, T5Tokenizer
3
  import torch
 
4
 
5
  # Load pre-trained T5 model and tokenizer
6
+ model_name = "t5-small" # Use t5-small for faster responses
7
  model = T5ForConditionalGeneration.from_pretrained(model_name)
8
  tokenizer = T5Tokenizer.from_pretrained(model_name)
9
 
 
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
  model.to(device)
13
 
14
+ # Streamlit Interface
15
+ st.title("Simple Chatbot with T5")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  def generate_response(input_text):
18
+ # Prepare the input (as T5 expects a task-based prompt, we'll just prefix with 'translate English to English' for simplicity)
19
+ input_text = f"conversation: {input_text}"
20
+
21
+ # Tokenize input text
22
+ input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device)
23
+
24
+ # Generate a response from the model
25
+ outputs = model.generate(input_ids, max_length=100, num_beams=5, top_p=0.95, temperature=0.7, pad_token_id=tokenizer.eos_token_id)
26
+
27
+ # Decode the model's output to a readable string
 
 
 
 
 
 
 
 
28
  bot_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
 
 
 
 
 
 
 
 
 
30
  return bot_output
31
 
32
+ # Create input box for user to type a message
 
 
 
 
 
 
 
 
33
  user_input = st.text_input("You: ", "")
34
 
35
  if user_input: