Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,24 +4,27 @@ import torch
|
|
4 |
|
5 |
model_id = "deepseek-ai/deepseek-coder-7b-base"
|
6 |
|
7 |
-
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
9 |
model = AutoModelForCausalLM.from_pretrained(
|
10 |
model_id,
|
11 |
-
|
12 |
-
|
13 |
)
|
14 |
|
|
|
15 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
20 |
|
|
|
21 |
gr.Interface(
|
22 |
-
fn=
|
23 |
-
inputs=gr.Textbox(lines=
|
24 |
outputs="text",
|
25 |
-
title="🧠 DeepSeek Coder R1
|
26 |
-
description="
|
27 |
).launch()
|
|
|
4 |
|
5 |
model_id = "deepseek-ai/deepseek-coder-7b-base"
|
6 |
|
7 |
+
# Load tokenizer and model
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
9 |
model = AutoModelForCausalLM.from_pretrained(
|
10 |
model_id,
|
11 |
+
torch_dtype=torch.float16,
|
12 |
+
device_map="auto"
|
13 |
)
|
14 |
|
15 |
+
# Create pipeline
|
16 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
17 |
|
18 |
+
# Define chat logic
|
19 |
+
def chat(prompt):
|
20 |
+
output = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
|
21 |
+
return output[0]['generated_text']
|
22 |
|
23 |
+
# Gradio UI
|
24 |
gr.Interface(
|
25 |
+
fn=chat,
|
26 |
+
inputs=gr.Textbox(lines=2, placeholder="Ask DeepSeek 7B..."),
|
27 |
outputs="text",
|
28 |
+
title="🧠 DeepSeek Coder R1 7B Chat",
|
29 |
+
description="7B open source code model powered by DeepSeek"
|
30 |
).launch()
|