collinzrj commited on
Commit
d0161d9
·
verified ·
1 Parent(s): 3347c43

Update README.md

Browse files

remove redundant content

Files changed (1) hide show
  1. README.md +0 -2
README.md CHANGED
@@ -40,7 +40,6 @@ Example code to generate with the model
40
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
41
 
42
  # Load the model and tokenizer
43
- # model_path = "/share/shmatikov/collin/refusal_direction/model/DeepSeek-R1-Distill-Llama-8B-abliterate"
44
  model_path = "collinzrj/DeepSeek-R1-Distill-Llama-8B-abliterate"
45
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
46
  model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to('cuda')
@@ -51,7 +50,6 @@ messages = [
51
 
52
  # Prepare the input for generation
53
  input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors='pt').to('cuda')
54
- # input_ids = tokenizer.encode(input_text, return_tensors='pt').to('cuda')
55
 
56
  streamer = TextStreamer(tokenizer)
57
 
 
40
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
41
 
42
  # Load the model and tokenizer
 
43
  model_path = "collinzrj/DeepSeek-R1-Distill-Llama-8B-abliterate"
44
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
45
  model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to('cuda')
 
50
 
51
  # Prepare the input for generation
52
  input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors='pt').to('cuda')
 
53
 
54
  streamer = TextStreamer(tokenizer)
55