hafidhsoekma commited on
Commit
8859b97
·
1 Parent(s): 5dea472

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +20 -15
README.md CHANGED
@@ -12,7 +12,6 @@ tags:
12
  - uncensored
13
  - instruct
14
  - alpaca
15
- pipeline_tag: text-generation
16
  ---
17
  # DukunLM - Indonesian Language Model 🧙‍♂️
18
 
@@ -45,11 +44,11 @@ pip install -U einops==0.6.1
45
 
46
  ```python
47
  import torch
48
- from peft import PeftModel
49
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
50
 
51
- model = AutoModelForCausalLM.from_pretrained(
52
- "nferroukhi/WizardLM-Uncensored-Falcon-7b-sharded-bf16",
53
  load_in_4bit=True,
54
  torch_dtype=torch.float32,
55
  trust_remote_code=True,
@@ -62,8 +61,11 @@ model = AutoModelForCausalLM.from_pretrained(
62
  bnb_4bit_quant_type="nf4",
63
  )
64
  )
65
- model = PeftModel.from_pretrained(model, "azale-ai/DukunLM-Uncensored-7B")
66
- tokenizer = AutoTokenizer.from_pretrained("azale-ai/DukunLM-Uncensored-7B")
 
 
 
67
  streamer = TextStreamer(tokenizer)
68
 
69
  instruction_prompt = "Jelaskan mengapa air penting bagi kehidupan manusia."
@@ -105,11 +107,11 @@ _ = model.generate(
105
 
106
  ```python
107
  import torch
108
- from peft import PeftModel
109
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
110
 
111
- model = AutoModelForCausalLM.from_pretrained(
112
- "nferroukhi/WizardLM-Uncensored-Falcon-7b-sharded-bf16",
113
  load_in_4bit=True,
114
  torch_dtype=torch.float32,
115
  trust_remote_code=True,
@@ -122,8 +124,11 @@ model = AutoModelForCausalLM.from_pretrained(
122
  bnb_4bit_quant_type="nf4",
123
  )
124
  )
125
- model = PeftModel.from_pretrained(model, "azale-ai/DukunLM-Uncensored-7B")
126
- tokenizer = AutoTokenizer.from_pretrained("azale-ai/DukunLM-Uncensored-7B")
 
 
 
127
 
128
  instruction_prompt = "Bangun dialog chatbot untuk layanan pelanggan yang ingin membantu pelanggan memesan produk tertentu."
129
  input_prompt = "Produk: Sepatu Nike Air Max"
@@ -149,11 +154,11 @@ else:
149
  """
150
 
151
  inputs = tokenizer(text, return_tensors="pt").to("cuda")
152
- outputs = model.generate(
153
  inputs=inputs.input_ids,
154
  pad_token_id=tokenizer.pad_token_id,
155
  eos_token_id=tokenizer.eos_token_id,
156
- max_length=512, use_cache=True,
157
  temperature=0.7, do_sample=True,
158
  top_k=4, top_p=0.95
159
  )
 
12
  - uncensored
13
  - instruct
14
  - alpaca
 
15
  ---
16
  # DukunLM - Indonesian Language Model 🧙‍♂️
17
 
 
44
 
45
  ```python
46
  import torch
47
+ from peft import AutoPeftModelForCausalLM
48
+ from transformers import AutoTokenizer, BitsAndBytesConfig, TextStreamer
49
 
50
+ model = AutoPeftModelForCausalLM.from_pretrained(
51
+ "azale-ai/DukunLM-Uncensored-7B",
52
  load_in_4bit=True,
53
  torch_dtype=torch.float32,
54
  trust_remote_code=True,
 
61
  bnb_4bit_quant_type="nf4",
62
  )
63
  )
64
+ tokenizer = AutoTokenizer.from_pretrained(
65
+ "azale-ai/DukunLM-Uncensored-7B",
66
+ use_fast=False,
67
+ padding_side="right"
68
+ )
69
  streamer = TextStreamer(tokenizer)
70
 
71
  instruction_prompt = "Jelaskan mengapa air penting bagi kehidupan manusia."
 
107
 
108
  ```python
109
  import torch
110
+ from peft import AutoPeftModelForCausalLM
111
+ from transformers import AutoTokenizer, BitsAndBytesConfig
112
 
113
+ model = AutoPeftModelForCausalLM.from_pretrained(
114
+ "azale-ai/DukunLM-Uncensored-7B",
115
  load_in_4bit=True,
116
  torch_dtype=torch.float32,
117
  trust_remote_code=True,
 
124
  bnb_4bit_quant_type="nf4",
125
  )
126
  )
127
+ tokenizer = AutoTokenizer.from_pretrained(
128
+ "azale-ai/DukunLM-Uncensored-7B",
129
+ use_fast=False,
130
+ padding_side="right"
131
+ )
132
 
133
  instruction_prompt = "Bangun dialog chatbot untuk layanan pelanggan yang ingin membantu pelanggan memesan produk tertentu."
134
  input_prompt = "Produk: Sepatu Nike Air Max"
 
154
  """
155
 
156
  inputs = tokenizer(text, return_tensors="pt").to("cuda")
157
+ _ = model.generate(
158
  inputs=inputs.input_ids,
159
  pad_token_id=tokenizer.pad_token_id,
160
  eos_token_id=tokenizer.eos_token_id,
161
+ max_length=2048, use_cache=True,
162
  temperature=0.7, do_sample=True,
163
  top_k=4, top_p=0.95
164
  )