Spaces:
Running
Running
nguyenbh
commited on
Commit
·
e8daeee
1
Parent(s):
724e3a3
update examples
Browse files
app.py
CHANGED
@@ -17,8 +17,10 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
|
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
# Azure ML endpoint configuration
|
20 |
-
url = os.getenv("AZURE_ENDPOINT")
|
21 |
-
api_key = os.getenv("AZURE_API_KEY")
|
|
|
|
|
22 |
|
23 |
|
24 |
# Initialize MIME types
|
@@ -296,6 +298,39 @@ def process_message(history, message, conversation_state):
|
|
296 |
|
297 |
return history, gr.MultimodalTextbox(value=None, interactive=False), conversation_state
|
298 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
def process_audio_example_direct(example_text, example_audio_url, history, conversation_state):
|
300 |
"""Process an audio example directly from a URL."""
|
301 |
try:
|
@@ -567,22 +602,18 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
567 |
|
568 |
with gr.Column(scale=1):
|
569 |
with gr.Tab("Audio & Text"):
|
570 |
-
gr.Markdown("### Audio Examples")
|
571 |
-
|
572 |
# Example 1
|
573 |
-
gr.Markdown("Example 1: **Transcribe this audio clip**")
|
574 |
gr.Audio("https://diamondfan.github.io/audio_files/english.weekend.plan.wav",
|
575 |
label="Preview", elem_id="small-audio")
|
576 |
|
577 |
-
example1_btn = gr.Button("
|
578 |
|
579 |
gr.Markdown("-----")
|
580 |
|
581 |
# Example 2
|
582 |
-
gr.Markdown("Example 2: **Translate audio transcription to English**")
|
583 |
gr.Audio("https://diamondfan.github.io/audio_files/japanese.seattle.trip.report.wav",
|
584 |
label="Preview", elem_id="small-audio")
|
585 |
-
example2_btn = gr.Button("
|
586 |
|
587 |
# Define handlers for audio examples
|
588 |
def run_audio_example1():
|
@@ -614,14 +645,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
614 |
|
615 |
with gr.Tab("Image & Text"):
|
616 |
# Example 1
|
617 |
-
gr.Markdown("Example 1: **Write a limerick about this image**")
|
618 |
gr.Image("https://upload.wikimedia.org/wikipedia/commons/thumb/3/31/Hanoi_Temple_of_Literature.jpg/640px-Hanoi_Temple_of_Literature.jpg", label="Preview")
|
619 |
-
img_example1_btn = gr.Button("
|
620 |
|
621 |
# Example 2
|
622 |
-
gr.Markdown("Example 2: **convert the chart to a markdown table**")
|
623 |
gr.Image("https://pub-c2c1d9230f0b4abb9b0d2d95e06fd4ef.r2.dev/sites/566/2024/09/Screenshot-2024-09-16-115417.png", label="Preview")
|
624 |
-
img_example2_btn = gr.Button("
|
625 |
|
626 |
# Define handlers for image examples
|
627 |
def run_image_example1():
|
@@ -633,7 +662,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
633 |
|
634 |
def run_image_example2():
|
635 |
return process_image_example_direct(
|
636 |
-
"
|
637 |
"https://pub-c2c1d9230f0b4abb9b0d2d95e06fd4ef.r2.dev/sites/566/2024/09/Screenshot-2024-09-16-115417.png",
|
638 |
[], []
|
639 |
)
|
@@ -652,15 +681,25 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
652 |
)
|
653 |
|
654 |
with gr.Tab("Text Only"):
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
664 |
|
665 |
gr.Markdown("### Instructions")
|
666 |
gr.Markdown("""
|
|
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
# Azure ML endpoint configuration
|
20 |
+
# url = os.getenv("AZURE_ENDPOINT")
|
21 |
+
# api_key = os.getenv("AZURE_API_KEY")
|
22 |
+
url = "https://nguyenbach-phi4mm.westus3.inference.ml.azure.com/score"
|
23 |
+
api_key = "Cvrjxi2JNjDYYGQwclnfvaBt38Gbc001LaF6KS0z7PeW00XWxjWjJQQJ99BBAAAAAAAAAAAAINFRAZMLuz96"
|
24 |
|
25 |
|
26 |
# Initialize MIME types
|
|
|
298 |
|
299 |
return history, gr.MultimodalTextbox(value=None, interactive=False), conversation_state
|
300 |
|
301 |
+
def process_text_example(example_text, history, conversation_state):
|
302 |
+
"""Process a text example directly."""
|
303 |
+
try:
|
304 |
+
# Initialize history and conversation_state if they're None
|
305 |
+
if history is None:
|
306 |
+
history = []
|
307 |
+
|
308 |
+
if conversation_state is None:
|
309 |
+
conversation_state = []
|
310 |
+
|
311 |
+
# Add text message to history for display
|
312 |
+
history.append({"role": "user", "content": example_text})
|
313 |
+
|
314 |
+
# Add to conversation state
|
315 |
+
content_items = [
|
316 |
+
{"type": "text", "text": example_text}
|
317 |
+
]
|
318 |
+
|
319 |
+
conversation_state.append({
|
320 |
+
"role": "user",
|
321 |
+
"content": content_items
|
322 |
+
})
|
323 |
+
|
324 |
+
# Generate bot response
|
325 |
+
return bot_response(history, conversation_state)
|
326 |
+
except Exception as e:
|
327 |
+
logger.error(f"Error processing text example: {e}", exc_info=True)
|
328 |
+
if history is None:
|
329 |
+
history = []
|
330 |
+
history.append({"role": "user", "content": example_text})
|
331 |
+
history.append({"role": "assistant", "content": f"Error: {str(e)}"})
|
332 |
+
return history, conversation_state
|
333 |
+
|
334 |
def process_audio_example_direct(example_text, example_audio_url, history, conversation_state):
|
335 |
"""Process an audio example directly from a URL."""
|
336 |
try:
|
|
|
602 |
|
603 |
with gr.Column(scale=1):
|
604 |
with gr.Tab("Audio & Text"):
|
|
|
|
|
605 |
# Example 1
|
|
|
606 |
gr.Audio("https://diamondfan.github.io/audio_files/english.weekend.plan.wav",
|
607 |
label="Preview", elem_id="small-audio")
|
608 |
|
609 |
+
example1_btn = gr.Button("Transcribe this audio clip")
|
610 |
|
611 |
gr.Markdown("-----")
|
612 |
|
613 |
# Example 2
|
|
|
614 |
gr.Audio("https://diamondfan.github.io/audio_files/japanese.seattle.trip.report.wav",
|
615 |
label="Preview", elem_id="small-audio")
|
616 |
+
example2_btn = gr.Button("Translate audio transcription to English")
|
617 |
|
618 |
# Define handlers for audio examples
|
619 |
def run_audio_example1():
|
|
|
645 |
|
646 |
with gr.Tab("Image & Text"):
|
647 |
# Example 1
|
|
|
648 |
gr.Image("https://upload.wikimedia.org/wikipedia/commons/thumb/3/31/Hanoi_Temple_of_Literature.jpg/640px-Hanoi_Temple_of_Literature.jpg", label="Preview")
|
649 |
+
img_example1_btn = gr.Button("Write a limerick about this image")
|
650 |
|
651 |
# Example 2
|
|
|
652 |
gr.Image("https://pub-c2c1d9230f0b4abb9b0d2d95e06fd4ef.r2.dev/sites/566/2024/09/Screenshot-2024-09-16-115417.png", label="Preview")
|
653 |
+
img_example2_btn = gr.Button("Convert the chart to a markdown table")
|
654 |
|
655 |
# Define handlers for image examples
|
656 |
def run_image_example1():
|
|
|
662 |
|
663 |
def run_image_example2():
|
664 |
return process_image_example_direct(
|
665 |
+
"Convert the chart to a markdown table",
|
666 |
"https://pub-c2c1d9230f0b4abb9b0d2d95e06fd4ef.r2.dev/sites/566/2024/09/Screenshot-2024-09-16-115417.png",
|
667 |
[], []
|
668 |
)
|
|
|
681 |
)
|
682 |
|
683 |
with gr.Tab("Text Only"):
|
684 |
+
# Create a list of example texts
|
685 |
+
text_example_list = [
|
686 |
+
"I'd like to buy a new car. Start by asking me about my budget and which features I care most about, then provide a recommendation.",
|
687 |
+
"Coffee shops have been slimming down their menus lately. Is less choice making our coffee runs better or do we miss the variety?",
|
688 |
+
"Explain the Transformer model to a medieval knight"
|
689 |
+
]
|
690 |
+
|
691 |
+
# Create buttons for each example
|
692 |
+
for i, example_text in enumerate(text_example_list):
|
693 |
+
with gr.Row():
|
694 |
+
# gr.Markdown(f"Example {i+1}: **{example_text}**")
|
695 |
+
text_example_btn = gr.Button(f"{example_text}")
|
696 |
+
|
697 |
+
# Connect button to handler with the specific example text
|
698 |
+
text_example_btn.click(
|
699 |
+
fn=lambda text=example_text: process_text_example(text, [], []),
|
700 |
+
inputs=[],
|
701 |
+
outputs=[chatbot, conversation_state]
|
702 |
+
)
|
703 |
|
704 |
gr.Markdown("### Instructions")
|
705 |
gr.Markdown("""
|