omeryentur commited on
Commit
36ac785
·
verified ·
1 Parent(s): 97266bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -15
app.py CHANGED
@@ -12,21 +12,9 @@ model_name = "google/gemma-2-2b-it"
12
  lora_model_name="Anlam-Lab/gemma-2-2b-it-anlamlab-SA-Chatgpt4mini"
13
 
14
 
15
- device = "cuda" if torch.cuda.is_available() else "cpu"
16
- @torch.no_grad()
17
- def load_model():
18
- tokenizer = AutoTokenizer.from_pretrained(model_name)
19
- model = AutoModelForCausalLM.from_pretrained(
20
- model_name,
21
- device_map=device,
22
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
23
- low_cpu_mem_usage=True
24
- )
25
- model = PeftModel.from_pretrained(model, lora_model_name)
26
- model.eval()
27
- return model, tokenizer
28
-
29
- model, tokenizer = load_model()
30
 
31
  def generate_response(text):
32
  example = f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|>Bir duygu analisti olarak sana verilen metinleri analiz et ve aşağıdaki kategorilerden yalnızca birini seçerek metnin duygu durumunu belirle:Positive,Negative,Neutral<|eot_id|><|start_header_id|>user<|end_header_id|>{text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
 
12
  lora_model_name="Anlam-Lab/gemma-2-2b-it-anlamlab-SA-Chatgpt4mini"
13
 
14
 
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu", torch_dtype=torch.float16)
17
+ model = PeftModel.from_pretrained(model, lora_model_name)
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  def generate_response(text):
20
  example = f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|>Bir duygu analisti olarak sana verilen metinleri analiz et ve aşağıdaki kategorilerden yalnızca birini seçerek metnin duygu durumunu belirle:Positive,Negative,Neutral<|eot_id|><|start_header_id|>user<|end_header_id|>{text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""