Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -34,9 +34,7 @@ def load_model_with_retries(model_name, max_retries=3, retry_delay=5):
|
|
34 |
model_name,
|
35 |
use_fast=False, # Sometimes the fast tokenizer causes issues
|
36 |
local_files_only=False,
|
37 |
-
token=os.environ.get("HF_TOKEN", None)
|
38 |
-
trust_remote_code=True,
|
39 |
-
timeout=600 # 10 minutes timeout
|
40 |
)
|
41 |
|
42 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -44,7 +42,6 @@ def load_model_with_retries(model_name, max_retries=3, retry_delay=5):
|
|
44 |
local_files_only=False,
|
45 |
token=os.environ.get("HF_TOKEN", None),
|
46 |
trust_remote_code=True,
|
47 |
-
timeout=600, # 10 minutes timeout
|
48 |
low_cpu_mem_usage=True, # Help with memory issues
|
49 |
torch_dtype="auto" # Use appropriate dtype
|
50 |
)
|
@@ -170,16 +167,23 @@ with gr.Blocks(title="Burmese-GPT-v3 Text Generation") as demo:
|
|
170 |
)
|
171 |
|
172 |
# Add examples if available
|
173 |
-
gr.Examples
|
174 |
-
|
|
|
175 |
["αα―ααΊαα²α· αα»α½ααΊαα±α¬αΊ ααα¬α
αα¬αΈ ααΌα±α¬αααΊαα«αααΊα", 150, 0.7],
|
176 |
["ααΌααΊαα¬ααα―ααΊααΆααΎα¬", 200, 0.8],
|
177 |
-
]
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
|
184 |
# Add troubleshooting section
|
185 |
gr.Markdown("### Troubleshooting")
|
@@ -189,11 +193,9 @@ with gr.Blocks(title="Burmese-GPT-v3 Text Generation") as demo:
|
|
189 |
- Make sure you have a stable internet connection
|
190 |
""")
|
191 |
|
192 |
-
# Launch the app with
|
193 |
demo.launch(
|
194 |
-
cache_examples=True,
|
195 |
show_error=True,
|
196 |
server_name="0.0.0.0", # Listen on all network interfaces
|
197 |
-
share=False
|
198 |
-
max_threads=16 # Increase if needed
|
199 |
)
|
|
|
34 |
model_name,
|
35 |
use_fast=False, # Sometimes the fast tokenizer causes issues
|
36 |
local_files_only=False,
|
37 |
+
token=os.environ.get("HF_TOKEN", None) # Use token if available
|
|
|
|
|
38 |
)
|
39 |
|
40 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
42 |
local_files_only=False,
|
43 |
token=os.environ.get("HF_TOKEN", None),
|
44 |
trust_remote_code=True,
|
|
|
45 |
low_cpu_mem_usage=True, # Help with memory issues
|
46 |
torch_dtype="auto" # Use appropriate dtype
|
47 |
)
|
|
|
167 |
)
|
168 |
|
169 |
# Add examples if available
|
170 |
+
with gr.Accordion("Examples", open=False):
|
171 |
+
gr.Markdown("Click on any example to try it:")
|
172 |
+
example_prompts = [
|
173 |
["αα―ααΊαα²α· αα»α½ααΊαα±α¬αΊ ααα¬α
αα¬αΈ ααΌα±α¬αααΊαα«αααΊα", 150, 0.7],
|
174 |
["ααΌααΊαα¬ααα―ααΊααΆααΎα¬", 200, 0.8],
|
175 |
+
]
|
176 |
+
for idx, example in enumerate(example_prompts):
|
177 |
+
example_btn = gr.Button(f"Example {idx+1}: {example[0][:20]}...")
|
178 |
+
example_btn.click(
|
179 |
+
lambda e=example: (e[0], e[1], e[2]),
|
180 |
+
inputs=[],
|
181 |
+
outputs=[prompt, max_length, temperature]
|
182 |
+
).then(
|
183 |
+
fn=generate_text,
|
184 |
+
inputs=[prompt, max_length, temperature],
|
185 |
+
outputs=output
|
186 |
+
)
|
187 |
|
188 |
# Add troubleshooting section
|
189 |
gr.Markdown("### Troubleshooting")
|
|
|
193 |
- Make sure you have a stable internet connection
|
194 |
""")
|
195 |
|
196 |
+
# Launch the app with appropriate settings
|
197 |
demo.launch(
|
|
|
198 |
show_error=True,
|
199 |
server_name="0.0.0.0", # Listen on all network interfaces
|
200 |
+
share=False # Set to True for temporary public link
|
|
|
201 |
)
|