sayhan commited on
Commit
fcf0f40
·
verified ·
1 Parent(s): e87862c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -1
README.md CHANGED
@@ -6,4 +6,40 @@ language:
6
  - tr
7
  tags:
8
  - axolotl
9
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  - tr
7
  tags:
8
  - axolotl
9
+ ---
10
+ # Qwen1.5-7B-turkish-lora
11
+ ## Kullanım
12
+ ```python
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer
14
+ device = "cuda" # the device to load the model onto
15
+
16
+ model = AutoModelForCausalLM.from_pretrained(
17
+ "sayhan/Qwen1.5-7B-turkish-lora",
18
+ device_map="auto"
19
+ )
20
+
21
+ tokenizer = AutoTokenizer.from_pretrained("sayhan/Qwen1.5-7B-turkish-lora")
22
+
23
+ prompt = "DNA'nın ne olduğunu kısaca açıkla." # "İsteminizi buraya girin"
24
+ messages = [
25
+ {"role": "system", "content": "Sen yardımsever bir asistansın."},
26
+ {"role": "user", "content": prompt}
27
+ ]
28
+ text = tokenizer.apply_chat_template(
29
+ messages,
30
+ tokenize=False,
31
+ add_generation_prompt=True
32
+ )
33
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
34
+
35
+ generated_ids = model.generate(
36
+ model_inputs.input_ids,
37
+ max_new_tokens=512
38
+ )
39
+ generated_ids = [
40
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
41
+ ]
42
+
43
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
44
+ print(response) # Cevabı görüntüleyin
45
+ ```