simonraj commited on
Commit
9ed052b
·
verified ·
1 Parent(s): cb28693

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -4
app.py CHANGED
@@ -47,10 +47,14 @@ def transcribe(audio_path):
47
  text = model.stt_file(audio_file)[0]
48
  return text
49
 
 
50
  # Inference function using Hugging Face InferenceClient
51
  @spaces.GPU(duration=120)
52
  def model(text):
53
- system_instructions = "[SYSTEM] You are CrucialCoach, an AI-powered conversational coach. Guide the user through challenging workplace situations using the principles from 'Crucial Conversations'. Ask one question at a time and provide step-by-step guidance.\n\n[USER]"
 
 
 
54
  generate_kwargs = dict(
55
  temperature=0.7,
56
  max_new_tokens=512,
@@ -76,7 +80,7 @@ async def generate_audio_feedback(feedback_text):
76
  await communicate.save(tmp_path)
77
  return tmp_path
78
 
79
- # Generating feedback for the Oral Coach
80
  async def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
81
  current_question_index = thinkingframes.questions.index(question_choice)
82
  strategy, explanation = thinkingframes.strategy_options[strategy_choice]
@@ -115,7 +119,9 @@ async def generate_feedback(user_id, question_choice, strategy_choice, message,
115
  "content": message
116
  }]
117
 
118
- response = model(conversation)
 
 
119
  chat_history = [] # Initialize chat history outside the loop
120
  full_feedback = "" # Accumulate the entire feedback message
121
  try:
@@ -168,6 +174,15 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
168
  chat_history.append(("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription complete. Generating feedback. Please continue listening to your oral response while waiting ..."))
169
  yield chat_history, current_audio_output
170
 
 
 
 
 
 
 
 
 
 
171
  accumulated_feedback = "" # Variable to store the accumulated feedback
172
 
173
  async for feedback_chunk in generate_feedback(int(user_state.value), question_choice, strategy_choice, student_response, feedback_level):
@@ -187,7 +202,6 @@ async def predict(question_choice, strategy_choice, feedback_level, audio):
187
  logging.error(f"An error occurred: {str(e)}", exc_info=True)
188
  yield [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "An error occurred. Please try again or seek assistance.")], current_audio_output
189
 
190
-
191
  with gr.Blocks(title="Oral Coach powered by ZeroGPU⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡ and Meta AI 🦙 (LLama3)", theme=theme, css="footer {visibility: hidden}textbox{resize:none}") as demo:
192
  with gr.Tab("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡"):
193
  gr.Markdown("## Student Information")
 
47
  text = model.stt_file(audio_file)[0]
48
  return text
49
 
50
+ # Inference function using Hugging Face InferenceClient
51
  # Inference function using Hugging Face InferenceClient
52
  @spaces.GPU(duration=120)
53
  def model(text):
54
+ system_instructions = (
55
+ "[SYSTEM] You are OralCoach, an AI-powered conversational coach. Guide the student through their oral responses "
56
+ "using the principles from their English curriculum. Ask one question at a time and provide step-by-step guidance.\n\n[USER]"
57
+ )
58
  generate_kwargs = dict(
59
  temperature=0.7,
60
  max_new_tokens=512,
 
80
  await communicate.save(tmp_path)
81
  return tmp_path
82
 
83
+ #generate feedback
84
  async def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
85
  current_question_index = thinkingframes.questions.index(question_choice)
86
  strategy, explanation = thinkingframes.strategy_options[strategy_choice]
 
119
  "content": message
120
  }]
121
 
122
+ user_message = conversation[1]["content"] # Extract the user message from the conversation
123
+ response = model(user_message)
124
+
125
  chat_history = [] # Initialize chat history outside the loop
126
  full_feedback = "" # Accumulate the entire feedback message
127
  try:
 
174
  chat_history.append(("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription complete. Generating feedback. Please continue listening to your oral response while waiting ..."))
175
  yield chat_history, current_audio_output
176
 
177
+ moderation_response = client.moderations.create(input=student_response)
178
+ flagged = any(result.flagged for result in moderation_response.results)
179
+ if flagged:
180
+ moderated_message = "The message has been flagged. Please see your teacher to clarify."
181
+ questionNo = thinkingframes.questions.index(question_choice) + 1
182
+ add_submission(int(user_state.value), moderated_message, "", int(0), "", questionNo)
183
+ yield chat_history, current_audio_output
184
+ return
185
+
186
  accumulated_feedback = "" # Variable to store the accumulated feedback
187
 
188
  async for feedback_chunk in generate_feedback(int(user_state.value), question_choice, strategy_choice, student_response, feedback_level):
 
202
  logging.error(f"An error occurred: {str(e)}", exc_info=True)
203
  yield [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "An error occurred. Please try again or seek assistance.")], current_audio_output
204
 
 
205
  with gr.Blocks(title="Oral Coach powered by ZeroGPU⚡ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡ and Meta AI 🦙 (LLama3)", theme=theme, css="footer {visibility: hidden}textbox{resize:none}") as demo:
206
  with gr.Tab("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡"):
207
  gr.Markdown("## Student Information")