Spaces:
Build error
Build error
Code cleanup
Browse files
app.py
CHANGED
@@ -17,8 +17,6 @@ examples = [
|
|
17 |
"I think things out well. When I speak, I speak with conviction. If I feel like it's something that best suits me and my person, I deal with it. I say it. I have no problem speaking out publicly about issues. But for personal things, and for things about personal selfishness, or wanting more money, I don't do that. Once I give my word, that's it. I don't go back to renegotiate. I don't renegotiate my contracts."
|
18 |
]
|
19 |
|
20 |
-
# Descriptions for each models
|
21 |
-
# descriptions = "Interview question remake is a model that..."
|
22 |
|
23 |
# pass in Strings of model choice and input text for context
|
24 |
@st.cache
|
@@ -51,11 +49,9 @@ def genQuestion(model_choice, context, tag):
|
|
51 |
context += ' <NINETY>'
|
52 |
elif (tag == '91+'):
|
53 |
context += ' <HUNDRED>'
|
54 |
-
# descriptions = "Interview question tagged is a model that..."
|
55 |
elif model_choice=="Reverse model":
|
56 |
model = BartForConditionalGeneration.from_pretrained("hyechanjun/reverse-interview-question")
|
57 |
tok = BartTokenizer.from_pretrained("hyechanjun/reverse-interview-question")
|
58 |
-
# descriptions = "Reverse interview question is a model that..."
|
59 |
|
60 |
inputs = tok(context, return_tensors="pt")
|
61 |
output = model.generate(inputs["input_ids"], num_beams=4, max_length=64, min_length=9, num_return_sequences=4, diversity_penalty=1.0, num_beam_groups=4)
|
@@ -66,14 +62,14 @@ def genQuestion(model_choice, context, tag):
|
|
66 |
return final_output
|
67 |
|
68 |
|
69 |
-
# Wide page layout
|
70 |
st.set_page_config(layout="wide")
|
71 |
|
72 |
# Title
|
73 |
st.title("Interview AI Test Website")
|
74 |
st.caption("With the advent of machine learning, it has become increasingly clear that AI is capable of completing tasks that were hitherto considered only possible by human minds. We are now pushing the boundaries of what AI can do with natural language processing (NLP), from summarizing pages of text to keeping up a conversation with a human. Our project aims to join those on the frontier of machine learning by creating an AI Interviewer. There are two main problems to address here: first, whether creating such an interviewer will be possible, and second, whether it will be any good. The models have been fed datasets derived from https://www.kaggle.com/datasets/shuyangli94/interview-npr-media-dialog-transcripts")
|
75 |
|
76 |
-
# Adding a Session State to store stateful variables
|
77 |
if 'button_sent' not in st.session_state:
|
78 |
st.session_state.button_sent = False
|
79 |
|
@@ -121,15 +117,8 @@ if st.button('Submit') or st.session_state.button_sent:
|
|
121 |
with st.spinner('Generating a response...'):
|
122 |
output = genQuestion(option, input, context_length)
|
123 |
print(output)
|
124 |
-
# st.write(output)
|
125 |
st.session_state.button_sent = True
|
126 |
st.text_area(label="Generated Responses:", value=output, height=200)
|
127 |
|
128 |
|
129 |
|
130 |
-
# TODO:
|
131 |
-
# - disable multiselect widget when responses are being generated AND when a question is not selected to be tagged
|
132 |
-
# - connect tags with an individual question
|
133 |
-
# - save session state so tags associated with their respective questions can also be saved
|
134 |
-
# - write/store the saved state data to some database for future use?
|
135 |
-
# - brainstorm good names for tags/labels OR allow users to enter their own tag names if possible
|
|
|
17 |
"I think things out well. When I speak, I speak with conviction. If I feel like it's something that best suits me and my person, I deal with it. I say it. I have no problem speaking out publicly about issues. But for personal things, and for things about personal selfishness, or wanting more money, I don't do that. Once I give my word, that's it. I don't go back to renegotiate. I don't renegotiate my contracts."
|
18 |
]
|
19 |
|
|
|
|
|
20 |
|
21 |
# pass in Strings of model choice and input text for context
|
22 |
@st.cache
|
|
|
49 |
context += ' <NINETY>'
|
50 |
elif (tag == '91+'):
|
51 |
context += ' <HUNDRED>'
|
|
|
52 |
elif model_choice=="Reverse model":
|
53 |
model = BartForConditionalGeneration.from_pretrained("hyechanjun/reverse-interview-question")
|
54 |
tok = BartTokenizer.from_pretrained("hyechanjun/reverse-interview-question")
|
|
|
55 |
|
56 |
inputs = tok(context, return_tensors="pt")
|
57 |
output = model.generate(inputs["input_ids"], num_beams=4, max_length=64, min_length=9, num_return_sequences=4, diversity_penalty=1.0, num_beam_groups=4)
|
|
|
62 |
return final_output
|
63 |
|
64 |
|
65 |
+
# Wide page layout
|
66 |
st.set_page_config(layout="wide")
|
67 |
|
68 |
# Title
|
69 |
st.title("Interview AI Test Website")
|
70 |
st.caption("With the advent of machine learning, it has become increasingly clear that AI is capable of completing tasks that were hitherto considered only possible by human minds. We are now pushing the boundaries of what AI can do with natural language processing (NLP), from summarizing pages of text to keeping up a conversation with a human. Our project aims to join those on the frontier of machine learning by creating an AI Interviewer. There are two main problems to address here: first, whether creating such an interviewer will be possible, and second, whether it will be any good. The models have been fed datasets derived from https://www.kaggle.com/datasets/shuyangli94/interview-npr-media-dialog-transcripts")
|
71 |
|
72 |
+
# Adding a Session State to store stateful variables
|
73 |
if 'button_sent' not in st.session_state:
|
74 |
st.session_state.button_sent = False
|
75 |
|
|
|
117 |
with st.spinner('Generating a response...'):
|
118 |
output = genQuestion(option, input, context_length)
|
119 |
print(output)
|
|
|
120 |
st.session_state.button_sent = True
|
121 |
st.text_area(label="Generated Responses:", value=output, height=200)
|
122 |
|
123 |
|
124 |
|
|
|
|
|
|
|
|
|
|
|
|