johntheajs commited on
Commit
4b93ec6
·
verified ·
1 Parent(s): b954f7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -3,9 +3,8 @@ import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  # Load the model and tokenizer
6
- model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
-
9
  model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
  # Function to generate responses based on user messages
@@ -16,11 +15,9 @@ def generate_response(messages):
16
  return generated_response
17
 
18
  # Streamlit app
19
- st.title("Mixtral Chatbot")
20
-
21
  messages = []
22
  user_input = st.text_input("You:", "")
23
-
24
  if st.button("Send"):
25
  if user_input:
26
  messages.append({"role": "user", "content": user_input})
@@ -34,4 +31,4 @@ for message in messages:
34
  if message["role"] == "user":
35
  st.text_input("You:", value=message["content"], disabled=True)
36
  elif message["role"] == "assistant":
37
- st.text_area("Mixtral:", value=message["content"], disabled=True)
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  # Load the model and tokenizer
6
+ model_id = "google/gemma-7b" # Replace with "google/gemma-7b-it" for instruction tuning
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
  # Function to generate responses based on user messages
 
15
  return generated_response
16
 
17
  # Streamlit app
18
+ st.title("Gemma Chatbot")
 
19
  messages = []
20
  user_input = st.text_input("You:", "")
 
21
  if st.button("Send"):
22
  if user_input:
23
  messages.append({"role": "user", "content": user_input})
 
31
  if message["role"] == "user":
32
  st.text_input("You:", value=message["content"], disabled=True)
33
  elif message["role"] == "assistant":
34
+ st.text_area("Gemma:", value=message["content"], disabled=True)