Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,41 +17,39 @@ if not huggingface_token:
|
|
17 |
print("no HUGGINGFACE_TOKEN if you need set secret ")
|
18 |
#raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
|
19 |
|
20 |
-
def init():
|
21 |
-
global text_generator
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
#model_id = "microsoft/Phi-3-mini-128k-instruct"
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, token=huggingface_token)
|
30 |
-
|
31 |
-
print(model_id,device,dtype)
|
32 |
-
histories = []
|
33 |
-
#model = None
|
34 |
|
35 |
|
36 |
|
37 |
-
if not is_hugging_face:
|
38 |
-
model = AutoModelForCausalLM.from_pretrained(
|
39 |
-
model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
|
40 |
-
)
|
41 |
-
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device ) #pipeline has not to(device)
|
42 |
-
|
43 |
-
if next(model.parameters()).is_cuda:
|
44 |
-
print("The model is on a GPU")
|
45 |
-
else:
|
46 |
-
print("The model is on a CPU")
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
@spaces.GPU(duration=120)
|
57 |
def generate_text(messages):
|
@@ -90,7 +88,6 @@ def call_generate_text(message, history):
|
|
90 |
return ""
|
91 |
|
92 |
demo = gr.ChatInterface(call_generate_text,type="messages")
|
93 |
-
|
94 |
if __name__ == "__main__":
|
95 |
-
|
96 |
demo.launch(share=True)
|
|
|
17 |
print("no HUGGINGFACE_TOKEN if you need set secret ")
|
18 |
#raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, token=huggingface_token)
|
28 |
+
|
29 |
+
print(model_id,device,dtype)
|
30 |
+
histories = []
|
31 |
+
#model = None
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
if not is_hugging_face:
|
36 |
+
model = AutoModelForCausalLM.from_pretrained(
|
37 |
+
model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
|
38 |
+
)
|
39 |
+
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device ) #pipeline has not to(device)
|
40 |
|
41 |
+
if next(model.parameters()).is_cuda:
|
42 |
+
print("The model is on a GPU")
|
43 |
+
else:
|
44 |
+
print("The model is on a CPU")
|
45 |
+
|
46 |
+
#print(f"text_generator.device='{text_generator.device}")
|
47 |
+
if str(text_generator.device).strip() == 'cuda':
|
48 |
+
print("The pipeline is using a GPU")
|
49 |
+
else:
|
50 |
+
print("The pipeline is using a CPU")
|
51 |
+
|
52 |
+
print("initialized")
|
53 |
|
54 |
@spaces.GPU(duration=120)
|
55 |
def generate_text(messages):
|
|
|
88 |
return ""
|
89 |
|
90 |
demo = gr.ChatInterface(call_generate_text,type="messages")
|
91 |
+
|
92 |
if __name__ == "__main__":
|
|
|
93 |
demo.launch(share=True)
|