|
import gradio as gr |
|
import numpy as np |
|
import librosa |
|
from transformers import pipeline |
|
import json |
|
import time |
|
from datetime import datetime |
|
|
|
|
|
STAGES = { |
|
"INTRO": "입장", |
|
"CLEANSING": "청신", |
|
"PRAYER": "기원", |
|
"SHARING": "송신" |
|
} |
|
|
|
|
|
speech_recognizer = pipeline("automatic-speech-recognition", |
|
model="kresnik/wav2vec2-large-xlsr-korean") |
|
emotion_classifier = pipeline("audio-classification", |
|
model="MIT/ast-finetuned-speech-commands-v2") |
|
text_analyzer = pipeline("sentiment-analysis", |
|
model="nlptown/bert-base-multilingual-uncased-sentiment") |
|
|
|
class DigitalGutApp: |
|
def __init__(self): |
|
self.current_stage = "INTRO" |
|
self.user_name = "" |
|
self.session_data = { |
|
"reflections": [], |
|
"voice_analysis": None, |
|
"generated_prompts": [], |
|
"current_location": "온천장역" |
|
} |
|
|
|
def create_interface(self): |
|
with gr.Blocks(theme=gr.themes.Soft()) as app: |
|
|
|
state = gr.State(self.session_data) |
|
current_stage = gr.State(self.current_stage) |
|
|
|
|
|
with gr.Column(visible=True) as header: |
|
gr.Markdown("# 디지털 굿판") |
|
stage_indicator = gr.Markdown(self._get_stage_description()) |
|
|
|
|
|
with gr.Column() as main_content: |
|
|
|
with gr.Column(visible=lambda: self.current_stage == "INTRO") as intro_screen: |
|
gr.Markdown(""" |
|
# 디지털 굿판에 오신 것을 환영합니다 |
|
온천천의 디지털 치유 공간으로 들어가보세요. |
|
""") |
|
name_input = gr.Textbox(label="이름을 알려주세요") |
|
start_button = gr.Button("여정 시작하기") |
|
|
|
|
|
with gr.Column(visible=lambda: self.current_stage == "CLEANSING") as cleansing_screen: |
|
with gr.Row(): |
|
|
|
audio_player = gr.Audio( |
|
value="assets/main_music.mp3", |
|
type="filepath", |
|
label="온천천의 소리" |
|
) |
|
|
|
with gr.Column(): |
|
reflection_input = gr.Textbox( |
|
label="현재 순간의 감상을 적어주세요", |
|
lines=3 |
|
) |
|
save_reflection = gr.Button("감상 저장") |
|
reflections_display = gr.Dataframe( |
|
headers=["시간", "감상", "감정"], |
|
label="기록된 감상들" |
|
) |
|
|
|
|
|
with gr.Column(visible=lambda: self.current_stage == "PRAYER") as prayer_screen: |
|
with gr.Row(): |
|
|
|
voice_input = gr.Audio( |
|
label="나누고 싶은 이야기를 들려주세요", |
|
sources=["microphone"], |
|
type="filepath" |
|
) |
|
|
|
analysis_output = gr.JSON(label="분석 결과") |
|
|
|
|
|
with gr.Column(visible=lambda: self.current_stage == "SHARING") as sharing_screen: |
|
final_prompt = gr.Textbox(label="생성된 프롬프트") |
|
gallery = gr.Gallery(label="시각화 결과") |
|
|
|
|
|
with gr.Column(visible=True) as floating_menu: |
|
gr.Button("🏠", scale=1) |
|
gr.Button("🎵", scale=1) |
|
gr.Button("🎤", scale=1) |
|
gr.Button("🖼️", scale=1) |
|
|
|
|
|
def start_journey(name): |
|
self.user_name = name |
|
self.current_stage = "CLEANSING" |
|
return self._update_visibility() |
|
|
|
def save_reflection(text, state): |
|
if not text.strip(): |
|
return state, gr.update() |
|
|
|
current_time = datetime.now().strftime("%H:%M:%S") |
|
sentiment = text_analyzer(text)[0] |
|
new_reflection = [current_time, text, sentiment["label"]] |
|
|
|
state["reflections"].append(new_reflection) |
|
return state, state["reflections"] |
|
|
|
def analyze_voice(audio, state): |
|
if audio is None: |
|
return {"error": "음성 입력이 없습니다."} |
|
|
|
result = self._comprehensive_voice_analysis(audio) |
|
state["voice_analysis"] = result |
|
return result, state |
|
|
|
|
|
start_button.click( |
|
fn=start_journey, |
|
inputs=[name_input], |
|
outputs=[intro_screen, cleansing_screen, prayer_screen, sharing_screen] |
|
) |
|
|
|
save_reflection.click( |
|
fn=save_reflection, |
|
inputs=[reflection_input, state], |
|
outputs=[state, reflections_display] |
|
) |
|
|
|
voice_input.change( |
|
fn=analyze_voice, |
|
inputs=[voice_input, state], |
|
outputs=[analysis_output, state] |
|
) |
|
|
|
return app |
|
|
|
def _comprehensive_voice_analysis(self, audio_path): |
|
"""종합적인 음성 분석 수행""" |
|
try: |
|
y, sr = librosa.load(audio_path) |
|
|
|
|
|
acoustic_features = { |
|
"energy": float(np.mean(librosa.feature.rms(y=y))), |
|
"pitch_mean": float(np.mean(librosa.pitch_tuning(y))), |
|
"tempo": float(librosa.beat.tempo(y)[0]), |
|
"mfcc": librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13).mean(axis=1).tolist() |
|
} |
|
|
|
|
|
emotion_result = emotion_classifier(y) |
|
|
|
|
|
text_result = speech_recognizer(y) |
|
|
|
|
|
text_sentiment = text_analyzer(text_result["text"])[0] |
|
|
|
return { |
|
"acoustic_analysis": acoustic_features, |
|
"emotion": emotion_result[0], |
|
"transcription": text_result["text"], |
|
"text_sentiment": text_sentiment |
|
} |
|
|
|
except Exception as e: |
|
return {"error": str(e)} |
|
|
|
def _get_stage_description(self): |
|
"""현재 단계에 대한 설명 반환""" |
|
descriptions = { |
|
"INTRO": "디지털 굿판에 오신 것을 환영합니다", |
|
"CLEANSING": "청신 - 소리로 정화하기", |
|
"PRAYER": "기원 - 목소리로 전하기", |
|
"SHARING": "송신 - 함께 나누기" |
|
} |
|
return descriptions.get(self.current_stage, "") |
|
|
|
def _update_visibility(self): |
|
"""현재 단계에 따른 화면 가시성 업데이트""" |
|
return { |
|
"intro_screen": self.current_stage == "INTRO", |
|
"cleansing_screen": self.current_stage == "CLEANSING", |
|
"prayer_screen": self.current_stage == "PRAYER", |
|
"sharing_screen": self.current_stage == "SHARING" |
|
} |
|
|
|
|
|
if __name__ == "__main__": |
|
app = DigitalGutApp() |
|
interface = app.create_interface() |
|
interface.launch() |