|
import gradio as gr |
|
import numpy as np |
|
import librosa |
|
from transformers import pipeline |
|
from datetime import datetime |
|
|
|
|
|
text_analyzer = pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment") |
|
|
|
def create_interface(): |
|
with gr.Blocks(theme=gr.themes.Soft()) as app: |
|
|
|
state = gr.State({ |
|
"stage": "intro", |
|
"reflections": [], |
|
"user_name": "" |
|
}) |
|
|
|
|
|
gr.Markdown("# 디지털 굿판") |
|
|
|
|
|
with gr.Tab("입장"): |
|
name_input = gr.Textbox(label="이름을 알려주세요") |
|
start_button = gr.Button("여정 시작하기") |
|
|
|
|
|
with gr.Tab("청신"): |
|
|
|
audio_player = gr.Audio( |
|
value="assets/main_music.mp3", |
|
type="filepath", |
|
label="온천천의 소리" |
|
) |
|
|
|
|
|
reflection_text = gr.Textbox( |
|
label="현재 순간의 감상을 적어주세요", |
|
lines=3 |
|
) |
|
save_button = gr.Button("감상 저장") |
|
reflections_display = gr.Dataframe( |
|
headers=["시간", "감상", "감정"], |
|
label="기록된 감상들" |
|
) |
|
|
|
|
|
with gr.Tab("기원"): |
|
voice_input = gr.Audio( |
|
label="나누고 싶은 이야기를 들려주세요", |
|
sources=["microphone"], |
|
type="filepath" |
|
) |
|
analysis_output = gr.JSON(label="분석 결과") |
|
|
|
|
|
with gr.Tab("송신"): |
|
final_prompt = gr.Textbox(label="생성된 프롬프트") |
|
|
|
|
|
def save_reflection(text, state): |
|
if not text.strip(): |
|
return state, None |
|
|
|
try: |
|
|
|
current_time = datetime.now().strftime("%H:%M:%S") |
|
|
|
|
|
sentiment = text_analyzer(text)[0] |
|
|
|
|
|
new_reflection = [current_time, text, sentiment["label"]] |
|
state["reflections"].append(new_reflection) |
|
|
|
return state, state["reflections"] |
|
except Exception as e: |
|
return state, f"오류 발생: {str(e)}" |
|
|
|
|
|
save_button.click( |
|
fn=save_reflection, |
|
inputs=[reflection_text, state], |
|
outputs=[state, reflections_display] |
|
) |
|
|
|
return app |
|
|
|
|
|
if __name__ == "__main__": |
|
interface = create_interface() |
|
interface.launch() |