Akjava commited on
Commit
ec66efc
Β·
verified Β·
1 Parent(s): 0c10f58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -1
app.py CHANGED
@@ -28,7 +28,17 @@ model = AutoModelForCausalLM.from_pretrained(
28
  model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
29
  )
30
  text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device) #pipeline has not to(device)
31
-
 
 
 
 
 
 
 
 
 
 
32
 
33
  @spaces.GPU(duration=120)
34
  def generate_text(messages):
 
28
  model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
29
  )
30
  text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device) #pipeline has not to(device)
31
+
32
+ if next(model.parameters()).is_cuda:
33
+ print("The model is on a GPU")
34
+ else:
35
+ print("The model is on a CPU")
36
+
37
+ # Assuming 'text_generator' is your initialized pipeline
38
+ if text_generator.model.is_cuda:
39
+ print("The pipeline is using a GPU")
40
+ else:
41
+ print("The pipeline is using a CPU")
42
 
43
  @spaces.GPU(duration=120)
44
  def generate_text(messages):