schuler commited on
Commit
0d93012
·
verified ·
1 Parent(s): 1473469

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -9,10 +9,13 @@ import streamlit as st
9
  REPO_NAME = 'schuler/experimental-JP47D20'
10
 
11
  # Load tokenizer and model
12
- tokenizer = AutoTokenizer.from_pretrained(REPO_NAME, trust_remote_code=True)
13
- generator_conf = GenerationConfig.from_pretrained(REPO_NAME)
14
- model = AutoModelForCausalLM.from_pretrained(REPO_NAME, trust_remote_code=True)
15
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
 
16
 
17
  # Configure the Streamlit app
18
  st.set_page_config(page_title="Experimental KPhi3 Model - Currently in Training", page_icon="🤗")
@@ -89,19 +92,17 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256, co
89
 
90
  Returns:
91
  tuple: A tuple containing the generated response and the updated chat history.
92
- """
93
- # Build the conversation prompt
94
- prompt = ""
95
- # f"{system_message}\nCurrent Conversation:\n"
96
- for message in chat_history:
97
- role = "<|assistant|>" if message['role'] == 'assistant' else "<|user|>"
98
- prompt += f"\n{role}\n{message['content']}\n"
99
-
100
-
101
  if continue_last:
102
  # We want to continue the last assistant response
103
- prompt = prompt
104
  else:
 
 
 
 
 
 
105
  prompt += f"\n<|user|>\n{user_text}\n<|assistant|>\n"
106
 
107
  # Generate the response
@@ -116,6 +117,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256, co
116
 
117
  generated_text = response_output[0]['generated_text']
118
 
 
 
119
  # Extract the assistant's response
120
  assistant_response = generated_text[len(prompt):].strip()
121
 
 
9
  REPO_NAME = 'schuler/experimental-JP47D20'
10
 
11
  # Load tokenizer and model
12
+ try:
13
+ tokenizer = AutoTokenizer.from_pretrained(REPO_NAME, trust_remote_code=True)
14
+ generator_conf = GenerationConfig.from_pretrained(REPO_NAME)
15
+ model = AutoModelForCausalLM.from_pretrained(REPO_NAME, trust_remote_code=True)
16
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
17
+ except Exception as e:
18
+ st.error(f"Failed to load model: {str(e)}")
19
 
20
  # Configure the Streamlit app
21
  st.set_page_config(page_title="Experimental KPhi3 Model - Currently in Training", page_icon="🤗")
 
92
 
93
  Returns:
94
  tuple: A tuple containing the generated response and the updated chat history.
95
+ """
 
 
 
 
 
 
 
 
96
  if continue_last:
97
  # We want to continue the last assistant response
98
+ prompt = st.session_state.last_response
99
  else:
100
+ # Build the conversation prompt
101
+ prompt = ""
102
+ # f"{system_message}\nCurrent Conversation:\n"
103
+ for message in chat_history:
104
+ role = "<|assistant|>" if message['role'] == 'assistant' else "<|user|>"
105
+ prompt += f"\n{role}\n{message['content']}\n<|user|>\n"
106
  prompt += f"\n<|user|>\n{user_text}\n<|assistant|>\n"
107
 
108
  # Generate the response
 
117
 
118
  generated_text = response_output[0]['generated_text']
119
 
120
+ st.session_state.last_response = generated_text
121
+
122
  # Extract the assistant's response
123
  assistant_response = generated_text[len(prompt):].strip()
124