admin commited on
Commit
2c32302
·
1 Parent(s): 2e5fa10

add cpu branch

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -28,6 +28,11 @@ def predict(
28
 
29
  instruction += f"<|im_start|>user\n{message}\n<|im_end|>\n<|im_start|>assistant\n"
30
  try:
 
 
 
 
 
31
  streamer = TextIteratorStreamer(
32
  tokenizer,
33
  skip_prompt=True,
@@ -67,8 +72,10 @@ def predict(
67
 
68
  if __name__ == "__main__":
69
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
70
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
71
- model = AutoModelForCausalLM.from_pretrained(MODEL_ID, device_map="auto")
 
 
72
  # Create Gradio interface
73
  gr.ChatInterface(
74
  predict,
 
28
 
29
  instruction += f"<|im_start|>user\n{message}\n<|im_end|>\n<|im_start|>assistant\n"
30
  try:
31
+ if device == torch.device("cpu"):
32
+ raise EnvironmentError(
33
+ "If you have computing power, you can test by cloning to local or forking to an account with purchased GPU environment"
34
+ )
35
+
36
  streamer = TextIteratorStreamer(
37
  tokenizer,
38
  skip_prompt=True,
 
72
 
73
  if __name__ == "__main__":
74
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
75
+ if device == torch.device("cuda"):
76
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
77
+ model = AutoModelForCausalLM.from_pretrained(MODEL_ID, device_map="auto")
78
+
79
  # Create Gradio interface
80
  gr.ChatInterface(
81
  predict,