Tech-Meld commited on
Commit
59fee4a
·
verified ·
1 Parent(s): 93ef414

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -0
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient, Repository
3
  import json
 
4
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
@@ -77,4 +78,9 @@ demo = gr.ChatInterface(
77
 
78
 
79
  if __name__ == "__main__":
 
 
 
 
 
80
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient, Repository
3
  import json
4
+ import torch
5
 
6
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
 
78
 
79
 
80
  if __name__ == "__main__":
81
+ # Check if CUDA GPU is installed
82
+ if torch.cuda.is_available():
83
+ print("CUDA GPU is installed. Running inference on GPU.")
84
+ else:
85
+ print("CUDA GPU is not installed. Running inference on CPU.")
86
  demo.launch()