venkat-natchi commited on
Commit
05e456e
·
verified ·
1 Parent(s): f315cdb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -28
app.py CHANGED
@@ -123,26 +123,28 @@ def generate_response(image, audio, text, count):
123
  return prediction[0].strip('<|endoftext|>').rstrip("\n")
124
 
125
 
 
126
  with gr.Blocks() as demo:
127
  gr.Markdown("# **AnyModeAssistant**")
128
  gr.Markdown("Use any mode text/image/audio to interact with AI assistant")
129
- with gr.Column():
130
- with gr.Row("Text"):
131
- text_input = gr.Textbox(placeholder="Enter your question here",
132
- label="Input")
133
- with gr.Row():
134
- image_input = gr.Image(type="filepath")
135
-
136
- with gr.Row("Audio mode"):
137
- audio_input = gr.Audio(type="filepath")
138
-
139
- with gr.Row("Image"):
140
- response_count = gr.Textbox(
141
- placeholder="Number of tokens to respond",
142
- defualt=20,
143
- label="Count")
144
- with gr.Column():
145
- response = gr.Textbox(label="AI Response")
 
146
  with gr.Row():
147
  submit_button = gr.Button("Submit")
148
  submit_button.click(generate_response,
@@ -150,14 +152,7 @@ with gr.Blocks() as demo:
150
  image_input, audio_input],
151
  outputs=response)
152
 
153
- # gr.Examples(
154
- # examples=[
155
- # ["What is a large language model?", "50"]
156
- # ],
157
- # # , image_input, image_text_input, audio_input],
158
- # inputs=[text_input, text_input_count],
159
- # outputs=[text_output], # , image_text_output, audio_text_output],
160
- # fn=example_inference,
161
- # )
162
-
163
- demo.launch()
 
123
  return prediction[0].strip('<|endoftext|>').rstrip("\n")
124
 
125
 
126
+ %%blocks
127
  with gr.Blocks() as demo:
128
  gr.Markdown("# **AnyModeAssistant**")
129
  gr.Markdown("Use any mode text/image/audio to interact with AI assistant")
130
+ with gr.Row():
131
+ with gr.Column(scale=4):
132
+ with gr.Row("Text"):
133
+ text_input = gr.Textbox(placeholder="Enter your question here",
134
+ label="Input")
135
+ with gr.Row():
136
+ image_input = gr.Image(type="filepath")
137
+
138
+ with gr.Row("Audio mode"):
139
+ audio_input = gr.Audio(type="filepath")
140
+
141
+ with gr.Row("Image"):
142
+ response_count = gr.Textbox(
143
+ placeholder="Number of tokens to respond",
144
+ value=20,
145
+ label="Count")
146
+ with gr.Column(scale=2):
147
+ response = gr.Textbox(label="AI Response")
148
  with gr.Row():
149
  submit_button = gr.Button("Submit")
150
  submit_button.click(generate_response,
 
152
  image_input, audio_input],
153
  outputs=response)
154
 
155
+
156
+
157
+ demo.launch(share=True)
158
+