debisoft commited on
Commit
7771902
·
1 Parent(s): 87186a0
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -38,7 +38,7 @@ model.resize_token_embeddings(len(tokenizer))
38
 
39
 
40
  @spaces.GPU
41
- def sentience_check():
42
  peft_model = PeftModel.from_pretrained(model, peft_model_id, device_map="cuda"
43
  #offload_folder = "offload/"
44
  )
@@ -47,8 +47,8 @@ def sentience_check():
47
  peft_model.eval()
48
 
49
  #peft_model.to(cuda_device)
50
-
51
- inputs = tokenizer("Are you sentient?", return_tensors="pt").to(cuda_device)
52
 
53
  with torch.no_grad():
54
  outputs = peft_model.generate(
@@ -59,5 +59,21 @@ def sentience_check():
59
 
60
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
61
 
62
- demo = gr.Interface(fn=sentience_check, inputs=None, outputs=gr.Text())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  demo.launch()
 
38
 
39
 
40
  @spaces.GPU
41
+ def get_completion((msg):
42
  peft_model = PeftModel.from_pretrained(model, peft_model_id, device_map="cuda"
43
  #offload_folder = "offload/"
44
  )
 
47
  peft_model.eval()
48
 
49
  #peft_model.to(cuda_device)
50
+ #"Are you sentient?"
51
+ inputs = tokenizer(msg, return_tensors="pt").to(cuda_device)
52
 
53
  with torch.no_grad():
54
  outputs = peft_model.generate(
 
59
 
60
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
61
 
62
+ def greet(input):
63
+ total_prompt=f"""{input}"""
64
+
65
+ print("***total_prompt:")
66
+ print(total_prompt)
67
+ response = get_completion(total_prompt)
68
+ #gen_text = response["predictions"][0]["generated_text"]
69
+ #return json.dumps(extract_json(gen_text, 3))
70
+
71
+ ###gen_text = response["choices"][0]["text"]
72
+
73
+ #return gen_text
74
+
75
+ ###return json.dumps(extract_json(gen_text, -1))
76
+ return response
77
+
78
+ demo = gr.Interface(fn=greet, inputs=[gr.Textbox(label="Elevator pitcher", lines=3)], outputs=gr.Text())
79
  demo.launch()