haepada commited on
Commit
0f6de85
·
verified ·
1 Parent(s): 4caff9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -65
app.py CHANGED
@@ -325,74 +325,74 @@ def create_interface():
325
  """
326
  return welcome_text, gr.update(selected="기준 설정")
327
 
328
- def play_music():
329
  audio_path = os.path.abspath(os.path.join("assets", "main_music.mp3"))
330
  return gr.update(value=audio_path, autoplay=True)
331
 
332
 
333
- def save_reflection(text, state):
334
- if not text.strip():
335
- return state, state["reflections"]
336
-
337
- try:
338
- current_time = datetime.now().strftime("%H:%M:%S")
339
- sentiment = text_analyzer(text)[0]
340
- new_reflection = [current_time, text, f"{sentiment['label']} ({sentiment['score']:.2f})"]
341
-
342
- state = state.copy()
343
- if "reflections" not in state:
344
- state["reflections"] = []
345
-
346
- state["reflections"].append(new_reflection)
347
- return state, state["reflections"]
348
- except Exception as e:
349
- print(f"Error in save_reflection: {str(e)}")
350
- return state, []
351
-
352
- def analyze_voice(audio_path, state):
353
- if audio_path is None:
354
- return state, "음성을 먼저 녹음해주세요.", "", "", ""
355
-
356
- try:
357
- result = analyze_voice_with_retry(audio_path, state)
358
-
359
- voice_result = (
360
- f"음성 감정: {result['emotions']['primary']} "
361
- f"(강도: {result['emotions']['intensity']:.1f}%, "
362
- f"신뢰도: {result['emotions']['confidence']:.2f})\n"
363
- f"특징: {', '.join(result['emotions']['characteristics'])}"
364
- )
365
-
366
- text_result = f"텍스트 감정: {result['sentiment']['label']} "
367
- f"(강도: {result['sentiment']['score']}/5)"
368
-
369
- prompt = generate_detailed_prompt(
370
- result['text'],
371
- result['emotions'],
372
- result['sentiment']
373
- )
374
-
375
- return state, result['text'], voice_result, text_result, prompt
376
- except Exception as e:
377
- return state, f"오류 발생: {str(e)}", "", "", ""
378
-
379
- def update_sending_tab(state):
380
- """청신의 감상과 기원의 음성을 종합하여 송신 탭 업데이트"""
381
- combined = "청신과 기원의 여정을 담은 이미지:\n\n"
382
- combined += update_final_prompt(state)
383
- return combined
384
-
385
- # 이벤트 연결
386
- start_btn.click(fn=start_journey, inputs=[name_input], outputs=[user_display, tabs])
387
- play_music_btn.click(fn=play_music, outputs=[audio])
388
- save_btn.click(fn=save_reflection, inputs=[reflection_input, state], outputs=[state, reflections_display])
389
- analyze_btn.click(fn=analyze_voice, inputs=[voice_input, state], outputs=[state, transcribed_text, voice_emotion, text_emotion, final_prompt])
390
- continue_to_prayer_btn.click(fn=lambda: gr.update(selected="기원"), outputs=[tabs])
391
- tabs.change(fn=update_sending_tab, inputs=[state], outputs=[combined_prompt])
392
- generate_btn.click(fn=generate_image_from_prompt, inputs=[combined_prompt], outputs=[result_image])
393
-
394
- return app
395
 
396
  if __name__ == "__main__":
397
- demo = create_interface()
398
- demo.launch(debug=True)
 
325
  """
326
  return welcome_text, gr.update(selected="기준 설정")
327
 
328
+ def play_music():
329
  audio_path = os.path.abspath(os.path.join("assets", "main_music.mp3"))
330
  return gr.update(value=audio_path, autoplay=True)
331
 
332
 
333
+ def save_reflection(text, state):
334
+ if not text.strip():
335
+ return state, state["reflections"]
336
+
337
+ try:
338
+ current_time = datetime.now().strftime("%H:%M:%S")
339
+ sentiment = text_analyzer(text)[0]
340
+ new_reflection = [current_time, text, f"{sentiment['label']} ({sentiment['score']:.2f})"]
341
+
342
+ state = state.copy()
343
+ if "reflections" not in state:
344
+ state["reflections"] = []
345
+
346
+ state["reflections"].append(new_reflection)
347
+ return state, state["reflections"]
348
+ except Exception as e:
349
+ print(f"Error in save_reflection: {str(e)}")
350
+ return state, []
351
+
352
+ def analyze_voice(audio_path, state):
353
+ if audio_path is None:
354
+ return state, "음성을 먼저 녹음해주세요.", "", "", ""
355
+
356
+ try:
357
+ result = analyze_voice_with_retry(audio_path, state)
358
+
359
+ voice_result = (
360
+ f"음성 감정: {result['emotions']['primary']} "
361
+ f"(강도: {result['emotions']['intensity']:.1f}%, "
362
+ f"신뢰도: {result['emotions']['confidence']:.2f})\n"
363
+ f"특징: {', '.join(result['emotions']['characteristics'])}"
364
+ )
365
+
366
+ text_result = f"텍스트 감정: {result['sentiment']['label']} "
367
+ f"(강도: {result['sentiment']['score']}/5)"
368
+
369
+ prompt = generate_detailed_prompt(
370
+ result['text'],
371
+ result['emotions'],
372
+ result['sentiment']
373
+ )
374
+
375
+ return state, result['text'], voice_result, text_result, prompt
376
+ except Exception as e:
377
+ return state, f"오류 발생: {str(e)}", "", "", ""
378
+
379
+ def update_sending_tab(state):
380
+ """청신의 감상과 기원의 음성을 종합하여 송신 탭 업데이트"""
381
+ combined = "청신과 기원의 여정을 담은 이미지:\n\n"
382
+ combined += update_final_prompt(state)
383
+ return combined
384
+
385
+ # 이벤트 연결
386
+ start_btn.click(fn=start_journey, inputs=[name_input], outputs=[user_display, tabs])
387
+ play_music_btn.click(fn=play_music, outputs=[audio])
388
+ save_btn.click(fn=save_reflection, inputs=[reflection_input, state], outputs=[state, reflections_display])
389
+ analyze_btn.click(fn=analyze_voice, inputs=[voice_input, state], outputs=[state, transcribed_text, voice_emotion, text_emotion, final_prompt])
390
+ continue_to_prayer_btn.click(fn=lambda: gr.update(selected="기원"), outputs=[tabs])
391
+ tabs.change(fn=update_sending_tab, inputs=[state], outputs=[combined_prompt])
392
+ generate_btn.click(fn=generate_image_from_prompt, inputs=[combined_prompt], outputs=[result_image])
393
+
394
+ return app
395
 
396
  if __name__ == "__main__":
397
+ demo = create_interface()
398
+ demo.launch(debug=True)