sitammeur commited on
Commit
5b9a1a8
·
verified ·
1 Parent(s): 254adc2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +198 -0
app.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Importing required libraries
2
+ import warnings
3
+ warnings.filterwarnings("ignore")
4
+
5
+ import json
6
+ import subprocess
7
+ from llama_cpp import Llama
8
+ from llama_cpp_agent import LlamaCppAgent
9
+ from llama_cpp_agent import MessagesFormatterType
10
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
11
+ from llama_cpp_agent.chat_history import BasicChatHistory
12
+ from llama_cpp_agent.chat_history.messages import Roles
13
+ import gradio as gr
14
+ from huggingface_hub import hf_hub_download
15
+
16
+
17
+ # Download gguf model files
18
+ llm = None
19
+ llm_model = None
20
+
21
+ hf_hub_download(
22
+ repo_id="bartowski/Qwen2.5-Coder-1.5B-Instruct-GGUF",
23
+ filename="Qwen2.5-Coder-1.5B-Instruct-Q6_K.gguf",
24
+ local_dir="./models",
25
+ )
26
+ hf_hub_download(
27
+ repo_id="bartowski/Qwen2.5-Coder-3B-Instruct-GGUF",
28
+ filename="Qwen2.5-Coder-3B-Instruct-Q5_K_S.gguf",
29
+ local_dir="./models",
30
+ )
31
+
32
+ # Set the title and description
33
+ title = "Qwen-Coder Llama.cpp"
34
+ description = """Dolphin 3.0 is a powerful, general-purpose local AI model designed for coding, math, and various other tasks, aiming similar to the models like ChatGPT and Claude."""
35
+
36
+
37
+ def respond(
38
+ message,
39
+ history: list[tuple[str, str]],
40
+ model,
41
+ system_message,
42
+ max_tokens,
43
+ temperature,
44
+ top_p,
45
+ top_k,
46
+ repeat_penalty,
47
+ ):
48
+ """
49
+ Respond to a message using the Dolphin-3 model via Llama.cpp.
50
+
51
+ Args:
52
+ - message (str): The message to respond to.
53
+ - history (list[tuple[str, str]]): The chat history.
54
+ - model (str): The model to use.
55
+ - system_message (str): The system message to use.
56
+ - max_tokens (int): The maximum number of tokens to generate.
57
+ - temperature (float): The temperature of the model.
58
+ - top_p (float): The top-p of the model.
59
+ - top_k (int): The top-k of the model.
60
+ - repeat_penalty (float): The repetition penalty of the model.
61
+
62
+ Returns:
63
+ str: The response to the message.
64
+ """
65
+ # Load the global variables
66
+ global llm
67
+ global llm_model
68
+
69
+ # Load the model
70
+ if llm is None or llm_model != model:
71
+ llm = Llama(
72
+ model_path=f"models/{model}",
73
+ flash_attn=False,
74
+ n_gpu_layers=0,
75
+ n_batch=32,
76
+ n_ctx=8192,
77
+ )
78
+ llm_model = model
79
+ provider = LlamaCppPythonProvider(llm)
80
+
81
+ # Create the agent
82
+ agent = LlamaCppAgent(
83
+ provider,
84
+ system_prompt=f"{system_message}",
85
+ predefined_messages_formatter_type=MessagesFormatterType.CHATML,
86
+ debug_output=True,
87
+ )
88
+
89
+ # Set the settings like temperature, top-k, top-p, max tokens, etc.
90
+ settings = provider.get_provider_default_settings()
91
+ settings.temperature = temperature
92
+ settings.top_k = top_k
93
+ settings.top_p = top_p
94
+ settings.max_tokens = max_tokens
95
+ settings.repeat_penalty = repeat_penalty
96
+ settings.stream = True
97
+
98
+ messages = BasicChatHistory()
99
+
100
+ # Add the chat history
101
+ for msn in history:
102
+ user = {"role": Roles.user, "content": msn[0]}
103
+ assistant = {"role": Roles.assistant, "content": msn[1]}
104
+ messages.add_message(user)
105
+ messages.add_message(assistant)
106
+
107
+ # Get the response stream
108
+ stream = agent.get_chat_response(
109
+ message,
110
+ llm_sampling_settings=settings,
111
+ chat_history=messages,
112
+ returns_streaming_generator=True,
113
+ print_output=False,
114
+ )
115
+
116
+ # Generate the response
117
+ outputs = ""
118
+ for output in stream:
119
+ outputs += output
120
+ yield outputs
121
+
122
+
123
+ # Create a chat interface
124
+ demo = gr.ChatInterface(
125
+ respond,
126
+ additional_inputs_accordion=gr.Accordion(
127
+ label="⚙️ Parameters", open=False, render=False
128
+ ),
129
+ additional_inputs=[
130
+ gr.Dropdown(
131
+ choices=[
132
+ "Qwen2.5-Coder-1.5B-Instruct-Q6_K.gguf",
133
+ "Qwen2.5-Coder-3B-Instruct-Q5_K_S.gguf",
134
+ ],
135
+ value="Qwen2.5-Coder-1.5B-Instruct-Q6_K.gguf",
136
+ label="Model",
137
+ info="Select the AI model to use for chat",
138
+ ),
139
+ gr.Textbox(
140
+ value="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
141
+ label="System Prompt",
142
+ info="Define the AI assistant's personality and behavior",
143
+ lines=2,
144
+ ),
145
+ gr.Slider(
146
+ minimum=512,
147
+ maximum=4096,
148
+ value=2048,
149
+ step=512,
150
+ label="Max Tokens",
151
+ info="Maximum length of response (higher = longer replies)",
152
+ ),
153
+ gr.Slider(
154
+ minimum=0.1,
155
+ maximum=2.0,
156
+ value=0.7,
157
+ step=0.1,
158
+ label="Temperature",
159
+ info="Creativity level (higher = more creative, lower = more focused)",
160
+ ),
161
+ gr.Slider(
162
+ minimum=0.1,
163
+ maximum=1.0,
164
+ value=0.95,
165
+ step=0.05,
166
+ label="Top-p",
167
+ info="Nucleus sampling threshold",
168
+ ),
169
+ gr.Slider(
170
+ minimum=1,
171
+ maximum=100,
172
+ value=40,
173
+ step=1,
174
+ label="Top-k",
175
+ info="Limit vocabulary choices to top K tokens",
176
+ ),
177
+ gr.Slider(
178
+ minimum=1.0,
179
+ maximum=2.0,
180
+ value=1.1,
181
+ step=0.1,
182
+ label="Repetition Penalty",
183
+ info="Penalize repeated words (higher = less repetition)",
184
+ ),
185
+ ],
186
+ theme="Ocean",
187
+ submit_btn="Send",
188
+ stop_btn="Stop",
189
+ title=title,
190
+ description=description,
191
+ chatbot=gr.Chatbot(scale=1, show_copy_button=True),
192
+ flagging_mode="never",
193
+ )
194
+
195
+
196
+ # Launch the chat interface
197
+ if __name__ == "__main__":
198
+ demo.launch(debug=False)