Lyte commited on
Commit
45e65a9
·
verified ·
1 Parent(s): 55e00d3

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +42 -0
  2. README.md +15 -12
  3. app.py +201 -0
  4. start.sh +17 -0
Dockerfile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:22.04
2
+
3
+ # Install system dependencies
4
+ RUN apt-get update && \
5
+ apt-get install -y \
6
+ build-essential \
7
+ libssl-dev \
8
+ zlib1g-dev \
9
+ libboost-system-dev \
10
+ libboost-filesystem-dev \
11
+ cmake \
12
+ git \
13
+ python3-pip \
14
+ curl \
15
+ wget && \
16
+ rm -rf /var/lib/apt/lists/*
17
+
18
+ # Install Python dependencies
19
+ RUN pip3 install huggingface-hub openai gradio
20
+
21
+ # Build llama.cpp
22
+ RUN git clone https://github.com/ggerganov/llama.cpp && \
23
+ cd llama.cpp && \
24
+ mkdir build && \
25
+ cd build && \
26
+ cmake .. -DLLAMA_BUILD_SERVER=ON -DLLAMA_BUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release && \
27
+ cmake --build . --config Release --target llama-server -j $(nproc)
28
+
29
+ # Download model
30
+ RUN mkdir -p /models && \
31
+ wget -O /models/model.q2_k_l.gguf https://huggingface.co/unsloth/DeepSeek-R1-Distill-Qwen-32B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-32B-Q2_K_L.gguf
32
+
33
+ # Copy app and startup script
34
+ COPY app.py /app.py
35
+ COPY start.sh /start.sh
36
+ RUN chmod +x /start.sh
37
+
38
+ # Expose ports
39
+ EXPOSE 7860 8080
40
+
41
+ # Start services
42
+ CMD ["/start.sh"]
README.md CHANGED
@@ -1,12 +1,15 @@
1
- ---
2
- title: DeepSeek R1 Distill Qwen 32B Demo GGUF
3
- emoji: 🌍
4
- colorFrom: gray
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.13.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
1
+ ---
2
+ title: DeepSeek R1 Distill Qwen 32B Q2_K_L Demo
3
+ emoji: 📈
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: docker
7
+ sdk_version: 5.13.0
8
+ app_file: app.py
9
+ app_port: 7860
10
+ pinned: false
11
+ license: mit
12
+ thumbnail: >-
13
+ https://cdn-uploads.huggingface.co/production/uploads/62f847d692950415b63c6011/KdOkloioovSNbYhV2BO5v.png
14
+ short_description: DeepSeek R1 Distill Qwen 32B Demo GGUF(Q2_K_L) Fully in CPU
15
+ ---
app.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ from openai import OpenAI
4
+
5
+ DESCRIPTION = '''
6
+ # DeepSeek-R1 Distill Qwen-32B Demo
7
+ A reasoning model trained using RL (Reinforcement Learning) that demonstrates structured reasoning capabilities.
8
+ '''
9
+
10
+ CSS = """
11
+ .spinner {
12
+ animation: spin 1s linear infinite;
13
+ display: inline-block;
14
+ margin-right: 8px;
15
+ }
16
+ @keyframes spin {
17
+ from { transform: rotate(0deg); }
18
+ to { transform: rotate(360deg); }
19
+ }
20
+ .thinking-summary {
21
+ cursor: pointer;
22
+ padding: 8px;
23
+ background: #f5f5f5;
24
+ border-radius: 4px;
25
+ margin: 4px 0;
26
+ }
27
+ .thought-content {
28
+ padding: 10px;
29
+ background: #f8f9fa;
30
+ border-radius: 4px;
31
+ margin: 5px 0;
32
+ }
33
+ .thinking-container {
34
+ border-left: 3px solid #e0e0e0;
35
+ padding-left: 10px;
36
+ margin: 8px 0;
37
+ }
38
+ details:not([open]) .thinking-container {
39
+ border-left-color: #4CAF50;
40
+ }
41
+ """
42
+
43
+ client = OpenAI(base_url="http://localhost:8080/v1", api_key="no-key-required")
44
+
45
+ def user(message, history):
46
+ return "", history + [[message, None]]
47
+
48
+ class ParserState:
49
+ __slots__ = ['answer', 'thought', 'in_think', 'start_time', 'last_pos']
50
+ def __init__(self):
51
+ self.answer = ""
52
+ self.thought = ""
53
+ self.in_think = False
54
+ self.start_time = 0
55
+ self.last_pos = 0
56
+
57
+ def parse_response(text, state):
58
+ buffer = text[state.last_pos:]
59
+ state.last_pos = len(text)
60
+
61
+ while buffer:
62
+ if not state.in_think:
63
+ think_start = buffer.find('<think>')
64
+ if think_start != -1:
65
+ state.answer += buffer[:think_start]
66
+ state.in_think = True
67
+ state.start_time = time.perf_counter()
68
+ buffer = buffer[think_start + 7:]
69
+ else:
70
+ state.answer += buffer
71
+ break
72
+ else:
73
+ think_end = buffer.find('</think>')
74
+ if think_end != -1:
75
+ state.thought += buffer[:think_end]
76
+ state.in_think = False
77
+ buffer = buffer[think_end + 8:]
78
+ else:
79
+ state.thought += buffer
80
+ break
81
+
82
+ elapsed = time.perf_counter() - state.start_time if state.in_think else 0
83
+ return state, elapsed
84
+
85
+ def format_response(state, elapsed):
86
+ answer_part = state.answer.replace('<think>', '').replace('</think>', '')
87
+ collapsible = []
88
+
89
+ if state.thought or state.in_think:
90
+ status = (f"🌀 Thinking for {elapsed:.0f} seconds"
91
+ if state.in_think else f"✅ Thought for {elapsed:.0f} seconds")
92
+ collapsible.append(
93
+ f"<details open><summary>{status}</summary>\n\n<div class='thinking-container'>\n{state.thought}\n</div>\n</details>"
94
+ )
95
+
96
+ return collapsible, answer_part
97
+
98
+ def generate_response(history, temperature, top_p, max_tokens, active_gen):
99
+ messages = [{"role": "user", "content": history[-1][0]}]
100
+ full_response = ""
101
+ state = ParserState()
102
+ last_update = 0
103
+
104
+ try:
105
+ stream = client.chat.completions.create(
106
+ model="",
107
+ messages=messages,
108
+ temperature=temperature,
109
+ top_p=top_p,
110
+ max_tokens=max_tokens,
111
+ stream=True
112
+ )
113
+
114
+ for chunk in stream:
115
+ if not active_gen[0]:
116
+ break
117
+
118
+ if chunk.choices[0].delta.content:
119
+ full_response += chunk.choices[0].delta.content
120
+ state, elapsed = parse_response(full_response, state)
121
+
122
+ collapsible, answer_part = format_response(state, elapsed)
123
+ history[-1][1] = "\n\n".join(collapsible + [answer_part]) # Markdown-safe
124
+ yield history
125
+
126
+ # Final update
127
+ state, elapsed = parse_response(full_response, state)
128
+ collapsible, answer_part = format_response(state, elapsed)
129
+ history[-1][1] = "\n\n".join(collapsible + [answer_part]) # Markdown-safe
130
+ yield history
131
+
132
+ except Exception as e:
133
+ history[-1][1] = f"Error: {str(e)}"
134
+ yield history
135
+ finally:
136
+ active_gen[0] = False
137
+
138
+ with gr.Blocks(css=CSS) as demo:
139
+ gr.Markdown(DESCRIPTION)
140
+ active_gen = gr.State([False])
141
+
142
+ chatbot = gr.Chatbot(
143
+ elem_id="chatbot",
144
+ height=500,
145
+ show_label=False,
146
+ render_markdown=True
147
+ )
148
+
149
+ with gr.Row():
150
+ msg = gr.Textbox(
151
+ label="Message",
152
+ placeholder="Type your message...",
153
+ container=False,
154
+ scale=4
155
+ )
156
+ submit_btn = gr.Button("Send", variant='primary', scale=1)
157
+
158
+ with gr.Column(scale=2):
159
+ with gr.Row():
160
+ clear_btn = gr.Button("Clear", variant='secondary')
161
+ stop_btn = gr.Button("Stop", variant='stop')
162
+
163
+ with gr.Accordion("Parameters", open=False):
164
+ temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.6, label="Temperature")
165
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p")
166
+ max_tokens = gr.Slider(minimum=2048, maximum=16384, value=4096, step=64, label="Max Tokens")
167
+
168
+ gr.Examples(
169
+ examples=[
170
+ ["How many r's are in the word strawberry?"],
171
+ ["Write 10 funny sentences that end in a fruit!"],
172
+ ["Explain how RL for LLMs can be done!"]
173
+ ],
174
+ inputs=msg,
175
+ label="Example Prompts"
176
+ )
177
+
178
+ submit_event = submit_btn.click(
179
+ user, [msg, chatbot], [msg, chatbot], queue=False
180
+ ).then(
181
+ lambda: [True], outputs=active_gen
182
+ ).then(
183
+ generate_response, [chatbot, temperature, top_p, max_tokens, active_gen], chatbot
184
+ )
185
+
186
+ msg.submit(
187
+ user, [msg, chatbot], [msg, chatbot], queue=False
188
+ ).then(
189
+ lambda: [True], outputs=active_gen
190
+ ).then(
191
+ generate_response, [chatbot, temperature, top_p, max_tokens, active_gen], chatbot
192
+ )
193
+
194
+ stop_btn.click(
195
+ lambda: [False], None, active_gen, cancels=[submit_event]
196
+ )
197
+
198
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
199
+
200
+ if __name__ == "__main__":
201
+ demo.launch(server_name="0.0.0.0", server_port=7860)
start.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Start llama-server in background
4
+ cd /llama.cpp/build
5
+ ./bin/llama-server --host 0.0.0.0 --port 8080 --model /models/model.q2_k_l.gguf --ctx-size 16384 &
6
+
7
+ # Wait for server to initialize
8
+ echo "Waiting for server to start..."
9
+ until curl -s "http://localhost:8080/v1/models" >/dev/null; do
10
+ sleep 1
11
+ done
12
+
13
+ echo "Server is ready. Starting Gradio app."
14
+
15
+ # Start Gradio UI
16
+ cd /
17
+ python3 app.py