remove app simple. update readme
Browse files- README-HF.md +4 -4
- README.md +2 -2
- app.py +6 -1
- app_simple.py +0 -148
README-HF.md
CHANGED
@@ -32,10 +32,10 @@ The current social graph represents a British person with MND who:
|
|
32 |
|
33 |
1. Select a person from the dropdown menu
|
34 |
2. View their context information
|
35 |
-
3. Optionally enter current conversation context
|
36 |
-
4.
|
37 |
-
5.
|
38 |
-
6. Click "
|
39 |
|
40 |
## Customizing the Social Graph
|
41 |
|
|
|
32 |
|
33 |
1. Select a person from the dropdown menu
|
34 |
2. View their context information
|
35 |
+
3. Optionally enter current conversation context or record audio
|
36 |
+
4. If you record audio, click "Transcribe" to convert it to text
|
37 |
+
5. Choose a suggestion type
|
38 |
+
6. Click "Generate Suggestions" to get contextually relevant phrases
|
39 |
|
40 |
## Customizing the Social Graph
|
41 |
|
README.md
CHANGED
@@ -97,11 +97,11 @@ To deploy this application to Hugging Face Spaces:
|
|
97 |
|
98 |
## Future Improvements
|
99 |
|
100 |
-
- Add speech recognition for input
|
101 |
-
- Implement text-to-speech for output
|
102 |
- Add a visual representation of the social graph
|
103 |
- Support for multiple users with different social graphs
|
104 |
- Add emotion/sentiment detection for more contextually appropriate suggestions
|
|
|
|
|
105 |
|
106 |
## License
|
107 |
|
|
|
97 |
|
98 |
## Future Improvements
|
99 |
|
|
|
|
|
100 |
- Add a visual representation of the social graph
|
101 |
- Support for multiple users with different social graphs
|
102 |
- Add emotion/sentiment detection for more contextually appropriate suggestions
|
103 |
+
- Implement text-to-speech for output
|
104 |
+
- Improve speech recognition with a larger Whisper model
|
105 |
|
106 |
## License
|
107 |
|
app.py
CHANGED
@@ -169,7 +169,12 @@ with gr.Blocks(title="AAC Social Graph Assistant") as demo:
|
|
169 |
outputs=[suggestions_output],
|
170 |
)
|
171 |
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
173 |
|
174 |
# Launch the app
|
175 |
if __name__ == "__main__":
|
|
|
169 |
outputs=[suggestions_output],
|
170 |
)
|
171 |
|
172 |
+
# Transcribe audio to text
|
173 |
+
transcribe_btn.click(
|
174 |
+
transcribe_audio,
|
175 |
+
inputs=[audio_input],
|
176 |
+
outputs=[user_input],
|
177 |
+
)
|
178 |
|
179 |
# Launch the app
|
180 |
if __name__ == "__main__":
|
app_simple.py
DELETED
@@ -1,148 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from utils import SocialGraphManager, SuggestionGenerator
|
3 |
-
|
4 |
-
# Initialize the social graph manager and suggestion generator
|
5 |
-
social_graph = SocialGraphManager("social_graph.json")
|
6 |
-
suggestion_generator = SuggestionGenerator()
|
7 |
-
|
8 |
-
def format_person_display(person):
|
9 |
-
"""Format person information for display in the dropdown."""
|
10 |
-
return f"{person['name']} ({person['role']})"
|
11 |
-
|
12 |
-
def get_people_choices():
|
13 |
-
"""Get formatted choices for the people dropdown."""
|
14 |
-
people = social_graph.get_people_list()
|
15 |
-
return {format_person_display(person): person["id"] for person in people}
|
16 |
-
|
17 |
-
def get_suggestion_categories():
|
18 |
-
"""Get suggestion categories from the social graph."""
|
19 |
-
if "common_utterances" in social_graph.graph:
|
20 |
-
return list(social_graph.graph["common_utterances"].keys())
|
21 |
-
return []
|
22 |
-
|
23 |
-
def on_person_change(person_id):
|
24 |
-
"""Handle person selection change."""
|
25 |
-
if not person_id:
|
26 |
-
return "", []
|
27 |
-
|
28 |
-
person_context = social_graph.get_person_context(person_id)
|
29 |
-
context_info = f"**{person_context.get('name', '')}** - {person_context.get('role', '')}\n\n"
|
30 |
-
context_info += f"**Topics:** {', '.join(person_context.get('topics', []))}\n\n"
|
31 |
-
context_info += f"**Frequency:** {person_context.get('frequency', '')}\n\n"
|
32 |
-
context_info += f"**Context:** {person_context.get('context', '')}"
|
33 |
-
|
34 |
-
# Get common phrases for this person
|
35 |
-
phrases = person_context.get("common_phrases", [])
|
36 |
-
phrases_text = "\n\n".join(phrases)
|
37 |
-
|
38 |
-
return context_info, phrases_text
|
39 |
-
|
40 |
-
def generate_suggestions(person_id, user_input, suggestion_type):
|
41 |
-
"""Generate suggestions based on the selected person and user input."""
|
42 |
-
if not person_id:
|
43 |
-
return "Please select a person first."
|
44 |
-
|
45 |
-
person_context = social_graph.get_person_context(person_id)
|
46 |
-
|
47 |
-
# If suggestion type is "model", use the language model
|
48 |
-
if suggestion_type == "model":
|
49 |
-
suggestion = suggestion_generator.generate_suggestion(person_context, user_input)
|
50 |
-
return suggestion
|
51 |
-
|
52 |
-
# If suggestion type is "common_phrases", use the person's common phrases
|
53 |
-
elif suggestion_type == "common_phrases":
|
54 |
-
phrases = social_graph.get_relevant_phrases(person_id, user_input)
|
55 |
-
return "\n\n".join(phrases)
|
56 |
-
|
57 |
-
# If suggestion type is a category from common_utterances
|
58 |
-
elif suggestion_type in get_suggestion_categories():
|
59 |
-
utterances = social_graph.get_common_utterances(suggestion_type)
|
60 |
-
return "\n\n".join(utterances)
|
61 |
-
|
62 |
-
# Default fallback
|
63 |
-
return "No suggestions available."
|
64 |
-
|
65 |
-
def speak_text(text):
|
66 |
-
"""Function to 'speak' the selected text (placeholder for TTS integration)."""
|
67 |
-
return f"Speaking: {text}"
|
68 |
-
|
69 |
-
# Create the Gradio interface
|
70 |
-
with gr.Blocks(title="AAC Social Graph Assistant") as demo:
|
71 |
-
gr.Markdown("# AAC Social Graph Assistant")
|
72 |
-
gr.Markdown("Select who you're talking to, and get contextually relevant suggestions.")
|
73 |
-
|
74 |
-
with gr.Row():
|
75 |
-
with gr.Column(scale=1):
|
76 |
-
# Person selection
|
77 |
-
person_dropdown = gr.Dropdown(
|
78 |
-
choices=get_people_choices(),
|
79 |
-
label="Who are you talking to?"
|
80 |
-
)
|
81 |
-
|
82 |
-
# Context display
|
83 |
-
context_display = gr.Markdown(label="Context Information")
|
84 |
-
|
85 |
-
# User input
|
86 |
-
user_input = gr.Textbox(
|
87 |
-
label="Your current conversation (optional)",
|
88 |
-
placeholder="Type or paste current conversation context here...",
|
89 |
-
lines=3
|
90 |
-
)
|
91 |
-
|
92 |
-
# Suggestion type selection
|
93 |
-
suggestion_type = gr.Radio(
|
94 |
-
choices=["model", "common_phrases"] + get_suggestion_categories(),
|
95 |
-
value="model",
|
96 |
-
label="Suggestion Type"
|
97 |
-
)
|
98 |
-
|
99 |
-
# Generate button
|
100 |
-
generate_btn = gr.Button("Generate Suggestions", variant="primary")
|
101 |
-
|
102 |
-
with gr.Column(scale=1):
|
103 |
-
# Common phrases
|
104 |
-
common_phrases = gr.Textbox(
|
105 |
-
label="Common Phrases",
|
106 |
-
placeholder="Common phrases will appear here...",
|
107 |
-
lines=5
|
108 |
-
)
|
109 |
-
|
110 |
-
# Suggestions output
|
111 |
-
suggestions_output = gr.Textbox(
|
112 |
-
label="Suggested Phrases",
|
113 |
-
placeholder="Suggestions will appear here...",
|
114 |
-
lines=8
|
115 |
-
)
|
116 |
-
|
117 |
-
# Speak button
|
118 |
-
speak_btn = gr.Button("Speak Selected Text", variant="secondary")
|
119 |
-
|
120 |
-
# Speech output
|
121 |
-
speech_output = gr.Textbox(
|
122 |
-
label="Speech Output",
|
123 |
-
placeholder="Speech output will appear here...",
|
124 |
-
lines=2
|
125 |
-
)
|
126 |
-
|
127 |
-
# Set up event handlers
|
128 |
-
person_dropdown.change(
|
129 |
-
on_person_change,
|
130 |
-
inputs=[person_dropdown],
|
131 |
-
outputs=[context_display, common_phrases]
|
132 |
-
)
|
133 |
-
|
134 |
-
generate_btn.click(
|
135 |
-
generate_suggestions,
|
136 |
-
inputs=[person_dropdown, user_input, suggestion_type],
|
137 |
-
outputs=[suggestions_output]
|
138 |
-
)
|
139 |
-
|
140 |
-
speak_btn.click(
|
141 |
-
speak_text,
|
142 |
-
inputs=[suggestions_output],
|
143 |
-
outputs=[speech_output]
|
144 |
-
)
|
145 |
-
|
146 |
-
# Launch the app
|
147 |
-
if __name__ == "__main__":
|
148 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|