Update app.py
Browse files
app.py
CHANGED
|
@@ -37,11 +37,12 @@ this demo is governed by the original [license](https://github.com/meta-llama/ll
|
|
| 37 |
# DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
| 38 |
|
| 39 |
|
| 40 |
-
if torch.cuda.is_available():
|
| 41 |
model_id = "chuanli11/Llama-3.2-3B-Instruct-uncensored"
|
| 42 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
|
| 43 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 44 |
-
|
|
|
|
| 45 |
|
| 46 |
|
| 47 |
@spaces.GPU
|
|
|
|
| 37 |
# DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
| 38 |
|
| 39 |
|
| 40 |
+
if torch.cuda.is_available() or os.getenv("ZERO_GPU_SUPPORT", False):
|
| 41 |
model_id = "chuanli11/Llama-3.2-3B-Instruct-uncensored"
|
| 42 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
|
| 43 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 44 |
+
else:
|
| 45 |
+
raise RuntimeError("No compatible GPU environment found for this model.")
|
| 46 |
|
| 47 |
|
| 48 |
@spaces.GPU
|