Akjava commited on
Commit
569ebbc
Β·
verified Β·
1 Parent(s): 2281ee6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -28,10 +28,6 @@ def init():
28
  histories = []
29
  #model = None
30
 
31
- model = AutoModelForCausalLM.from_pretrained(
32
- model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
33
- )
34
- text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device ) #pipeline has not to(device)
35
 
36
  if next(model.parameters()).is_cuda:
37
  print("The model is on a GPU")
@@ -48,11 +44,10 @@ def init():
48
 
49
  @spaces.GPU(duration=120)
50
  def generate_text(messages):
51
- # model = AutoModelForCausalLM.from_pretrained(
52
- # model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
53
- # )
54
-
55
- #text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device) #pipeline has not to(device)
56
  result = text_generator(messages, max_new_tokens=256, do_sample=True, temperature=0.7)
57
 
58
  generated_output = result[0]["generated_text"]
 
28
  histories = []
29
  #model = None
30
 
 
 
 
 
31
 
32
  if next(model.parameters()).is_cuda:
33
  print("The model is on a GPU")
 
44
 
45
  @spaces.GPU(duration=120)
46
  def generate_text(messages):
47
+ model = AutoModelForCausalLM.from_pretrained(
48
+ model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
49
+ )
50
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device ) #pipeline has not to(device)
 
51
  result = text_generator(messages, max_new_tokens=256, do_sample=True, temperature=0.7)
52
 
53
  generated_output = result[0]["generated_text"]