Spaces:
Running
Running
Commit
·
17bea6b
1
Parent(s):
24ff264
Model upgrade and GPU Support
Browse files- .gradio/flagged/dataset2.csv +2 -0
- app.py +11 -4
.gradio/flagged/dataset2.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
How shall Codice Da Vinci help today?,Code Style,🧾 Leonardo's Work,timestamp
|
2 |
+
"make a script to compute fibonacci numbers, no comments please",Clean & Pythonic,,2025-04-19 15:21:21.859616
|
app.py
CHANGED
@@ -3,17 +3,24 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
3 |
import torch
|
4 |
|
5 |
# deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
|
6 |
-
model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_id) # Or your own!
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(model_id,
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
def generate_code(prompt, style="Clean & Pythonic"):
|
12 |
if style == "Verbose like a 15th-century manuscript":
|
13 |
prompt = "In a manner most detailed, write code that... " + prompt
|
14 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
15 |
outputs = model.generate(**inputs,
|
16 |
-
|
|
|
17 |
do_sample=True,
|
18 |
temperature=1.0,
|
19 |
top_p=0.95,
|
|
|
3 |
import torch
|
4 |
|
5 |
# deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
|
6 |
+
# model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
7 |
+
model_id = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_id) # Or your own!
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(model_id,
|
10 |
+
# device_map=None,
|
11 |
+
# torch_dtype=torch.float32,
|
12 |
+
device_map="auto",
|
13 |
+
torch_dtype=torch.float16,
|
14 |
+
trust_remote_code=True)
|
15 |
+
# model.to("cpu")
|
16 |
|
17 |
def generate_code(prompt, style="Clean & Pythonic"):
|
18 |
if style == "Verbose like a 15th-century manuscript":
|
19 |
prompt = "In a manner most detailed, write code that... " + prompt
|
20 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
21 |
outputs = model.generate(**inputs,
|
22 |
+
# max_new_tokens=100,
|
23 |
+
max_new_tokens=300,
|
24 |
do_sample=True,
|
25 |
temperature=1.0,
|
26 |
top_p=0.95,
|