pdarleyjr commited on
Commit
3fb9b0c
·
1 Parent(s): 79a19e9

Optimize Gradio configuration for better performance and reliability

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -1,9 +1,6 @@
1
  import gradio as gr
2
  from transformers import T5Tokenizer, T5ForConditionalGeneration
3
- from fastapi import FastAPI
4
- from fastapi.middleware.cors import CORSMiddleware
5
 
6
- # Initialize Gradio interface directly without FastAPI
7
  # Load the base T5 model and tokenizer
8
  model = T5ForConditionalGeneration.from_pretrained('t5-small')
9
  tokenizer = T5Tokenizer.from_pretrained('t5-small')
@@ -65,13 +62,11 @@ demo = gr.Interface(
65
  # Enable queue
66
  demo.queue()
67
 
68
- # Launch the app
69
  if __name__ == "__main__":
 
70
  demo.launch(
71
- share=True,
72
  server_name="0.0.0.0",
73
- server_port=7860,
74
- allowed_paths=[],
75
- show_error=True,
76
- cors_allowed_origins=["https://pdarleyjr.github.io"]
77
  )
 
1
  import gradio as gr
2
  from transformers import T5Tokenizer, T5ForConditionalGeneration
 
 
3
 
 
4
  # Load the base T5 model and tokenizer
5
  model = T5ForConditionalGeneration.from_pretrained('t5-small')
6
  tokenizer = T5Tokenizer.from_pretrained('t5-small')
 
62
  # Enable queue
63
  demo.queue()
64
 
65
+ # Launch the app with optimized configuration for Hugging Face Spaces
66
  if __name__ == "__main__":
67
+ demo.queue(concurrency_count=1, max_size=20) # Configure queue for better performance
68
  demo.launch(
 
69
  server_name="0.0.0.0",
70
+ share=True,
71
+ show_error=True
 
 
72
  )