ljcnju commited on
Commit
6a48872
·
verified ·
1 Parent(s): 78b5bb7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -6
README.md CHANGED
@@ -35,8 +35,7 @@ This is the model card of a 🤗 transformers model that has been pushed on the
35
 
36
  ## Driectly Uses
37
 
38
- ```
39
-
40
  from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline
41
  from peft import PeftModelForCausalLM
42
  from transformers import BitsAndBytesConfig
@@ -55,13 +54,11 @@ prompt = "<|translate|> public void removePresentationFormat() {remove1stPropert
55
  input = tokenzier(prompt,return_tensors="pt")
56
  output_ids = model.generate(**input)
57
  print(tokenzier.batch_decode(output_ids))
58
-
59
  ```
60
 
61
  ### Use with vLLM
62
 
63
- ```
64
-
65
  from vllm import LLM, SamplingParams,EngineArgs, LLMEngine, RequestOutput
66
  from vllm.lora.request import LoRARequest
67
  engine_args = EngineArgs(model="deepseek-ai/deepseek-coder-6.7b-base",
@@ -91,7 +88,6 @@ while engine.has_unfinished_requests():
91
  for request_output in request_outputs:
92
  finished = finished | request_output.finished
93
  print(request_outputs[0].outputs[0].text)
94
-
95
  ```
96
 
97
  [More Information Needed]
 
35
 
36
  ## Driectly Uses
37
 
38
+ ```Python
 
39
  from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline
40
  from peft import PeftModelForCausalLM
41
  from transformers import BitsAndBytesConfig
 
54
  input = tokenzier(prompt,return_tensors="pt")
55
  output_ids = model.generate(**input)
56
  print(tokenzier.batch_decode(output_ids))
 
57
  ```
58
 
59
  ### Use with vLLM
60
 
61
+ ```Python
 
62
  from vllm import LLM, SamplingParams,EngineArgs, LLMEngine, RequestOutput
63
  from vllm.lora.request import LoRARequest
64
  engine_args = EngineArgs(model="deepseek-ai/deepseek-coder-6.7b-base",
 
88
  for request_output in request_outputs:
89
  finished = finished | request_output.finished
90
  print(request_outputs[0].outputs[0].text)
 
91
  ```
92
 
93
  [More Information Needed]