solitudeLin commited on
Commit
fad64ab
·
verified ·
1 Parent(s): 8e92e61

Update server.js

Browse files
Files changed (1) hide show
  1. server.js +89 -75
server.js CHANGED
@@ -10,7 +10,6 @@ import {
10
  spaceInfo,
11
  fileExists,
12
  } from "@huggingface/hub";
13
- import { InferenceClient } from "@huggingface/inference";
14
  import bodyParser from "body-parser";
15
 
16
  import checkUser from "./middlewares/checkUser.js";
@@ -30,8 +29,12 @@ const __dirname = path.dirname(__filename);
30
  const PORT = process.env.APP_PORT || 3000;
31
  const REDIRECT_URI =
32
  process.env.REDIRECT_URI || `http://localhost:${PORT}/auth/login`;
33
- const MODEL_ID = "deepseek-ai/DeepSeek-V3-0324";
34
- const MAX_REQUESTS_PER_IP = 2;
 
 
 
 
35
 
36
  app.use(cookieParser());
37
  app.use(bodyParser.json());
@@ -204,7 +207,6 @@ app.post("/api/ask-ai", async (req, res) => {
204
  }
205
 
206
  const { hf_token } = req.cookies;
207
- let token = hf_token;
208
  const ip =
209
  req.headers["x-forwarded-for"]?.split(",")[0].trim() ||
210
  req.headers["x-real-ip"] ||
@@ -221,8 +223,6 @@ app.post("/api/ask-ai", async (req, res) => {
221
  message: "Log In to continue using the service",
222
  });
223
  }
224
-
225
- token = process.env.DEFAULT_HF_TOKEN;
226
  }
227
 
228
  // Set up response headers for streaming
@@ -230,20 +230,11 @@ app.post("/api/ask-ai", async (req, res) => {
230
  res.setHeader("Cache-Control", "no-cache");
231
  res.setHeader("Connection", "keep-alive");
232
 
233
- const client = new InferenceClient(token);
234
- let completeResponse = "";
235
-
236
  let TOKENS_USED = prompt?.length;
237
  if (previousPrompt) TOKENS_USED += previousPrompt.length;
238
  if (html) TOKENS_USED += html.length;
239
 
240
  const DEFAULT_PROVIDER = PROVIDERS.novita;
241
- // const selectedProvider =
242
- // provider === "auto"
243
- // ? TOKENS_USED < PROVIDERS.sambanova.max_tokens
244
- // ? PROVIDERS.sambanova
245
- // : DEFAULT_PROVIDER
246
- // : PROVIDERS[provider] ?? DEFAULT_PROVIDER;
247
  const selectedProvider =
248
  provider === "auto"
249
  ? DEFAULT_PROVIDER
@@ -258,73 +249,96 @@ app.post("/api/ask-ai", async (req, res) => {
258
  }
259
 
260
  try {
261
- const chatCompletion = client.chatCompletionStream({
262
- model: MODEL_ID,
263
- provider: selectedProvider.id,
264
- messages: [
265
- {
266
- role: "system",
267
- content: `ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. Use as much as you can TailwindCSS for the CSS, if you can't do something with TailwindCSS, then use custom CSS (make sure to import <script src="https://cdn.tailwindcss.com"></script> in the head). Also, try to ellaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE`,
268
- },
269
- ...(previousPrompt
270
- ? [
271
- {
272
- role: "user",
273
- content: previousPrompt,
274
- },
275
- ]
276
- : []),
277
- ...(html
278
- ? [
279
- {
280
- role: "assistant",
281
- content: `The current code is: ${html}.`,
282
- },
283
- ]
284
- : []),
285
- {
286
- role: "user",
287
- content: prompt,
288
- },
289
- ],
290
- ...(selectedProvider.id !== "sambanova"
291
- ? {
292
- max_tokens: selectedProvider.max_tokens,
293
- }
294
- : {}),
 
 
 
 
 
 
 
 
295
  });
296
 
 
 
 
 
 
 
 
 
 
297
  while (true) {
298
- const { done, value } = await chatCompletion.next();
299
- if (done) {
300
- break;
301
- }
302
- const chunk = value.choices[0]?.delta?.content;
303
- if (chunk) {
304
- if (provider !== "sambanova") {
305
- res.write(chunk);
306
- completeResponse += chunk;
307
-
308
- if (completeResponse.includes("</html>")) {
309
- break;
310
- }
311
- } else {
312
- let newChunk = chunk;
313
- if (chunk.includes("</html>")) {
314
- // Replace everything after the last </html> tag with an empty string
315
- newChunk = newChunk.replace(/<\/html>[\s\S]*/, "</html>");
316
- }
317
- completeResponse += newChunk;
318
- res.write(newChunk);
319
- if (newChunk.includes("</html>")) {
320
- break;
 
 
 
 
321
  }
322
  }
323
  }
324
  }
325
- // End the response stream
326
  res.end();
327
  } catch (error) {
 
 
328
  if (error.message.includes("exceeded your monthly included credits")) {
329
  return res.status(402).send({
330
  ok: false,
@@ -332,6 +346,7 @@ app.post("/api/ask-ai", async (req, res) => {
332
  message: error.message,
333
  });
334
  }
 
335
  if (!res.headersSent) {
336
  res.status(500).send({
337
  ok: false,
@@ -339,7 +354,6 @@ app.post("/api/ask-ai", async (req, res) => {
339
  error.message || "An error occurred while processing your request.",
340
  });
341
  } else {
342
- // Otherwise end the stream
343
  res.end();
344
  }
345
  }
@@ -387,4 +401,4 @@ app.get("*", (_req, res) => {
387
 
388
  app.listen(PORT, () => {
389
  console.log(`Server is running on port ${PORT}`);
390
- });
 
10
  spaceInfo,
11
  fileExists,
12
  } from "@huggingface/hub";
 
13
  import bodyParser from "body-parser";
14
 
15
  import checkUser from "./middlewares/checkUser.js";
 
29
  const PORT = process.env.APP_PORT || 3000;
30
  const REDIRECT_URI =
31
  process.env.REDIRECT_URI || `http://localhost:${PORT}/auth/login`;
32
+ const MAX_REQUESTS_PER_IP = 5;
33
+
34
+ // Custom API configuration
35
+ const CUSTOM_API_BASE_URL = "https://api.sakis.top/v1";
36
+ const CUSTOM_API_KEY = "sk-xpoSC7WyTfI10jk_2Xh6J1gRizxEqjRqCL6k81ZdiB3mRnJm6y70EA7gTD8";
37
+ const MODEL_ID = "gemini-2.5-pro-exp-03-25";
38
 
39
  app.use(cookieParser());
40
  app.use(bodyParser.json());
 
207
  }
208
 
209
  const { hf_token } = req.cookies;
 
210
  const ip =
211
  req.headers["x-forwarded-for"]?.split(",")[0].trim() ||
212
  req.headers["x-real-ip"] ||
 
223
  message: "Log In to continue using the service",
224
  });
225
  }
 
 
226
  }
227
 
228
  // Set up response headers for streaming
 
230
  res.setHeader("Cache-Control", "no-cache");
231
  res.setHeader("Connection", "keep-alive");
232
 
 
 
 
233
  let TOKENS_USED = prompt?.length;
234
  if (previousPrompt) TOKENS_USED += previousPrompt.length;
235
  if (html) TOKENS_USED += html.length;
236
 
237
  const DEFAULT_PROVIDER = PROVIDERS.novita;
 
 
 
 
 
 
238
  const selectedProvider =
239
  provider === "auto"
240
  ? DEFAULT_PROVIDER
 
249
  }
250
 
251
  try {
252
+ let completeResponse = "";
253
+
254
+ // Prepare the messages array for the API request
255
+ const messages = [
256
+ {
257
+ role: "system",
258
+ content: "ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. Use as much as you can TailwindCSS for the CSS, if you can't do something with TailwindCSS, then use custom CSS (make sure to import <script src=\"https://cdn.tailwindcss.com\"></script> in the head). Also, try to ellaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE"
259
+ }
260
+ ];
261
+
262
+ if (previousPrompt) {
263
+ messages.push({
264
+ role: "user",
265
+ content: previousPrompt
266
+ });
267
+ }
268
+
269
+ if (html) {
270
+ messages.push({
271
+ role: "assistant",
272
+ content: `The current code is: ${html}.`
273
+ });
274
+ }
275
+
276
+ messages.push({
277
+ role: "user",
278
+ content: prompt
279
+ });
280
+
281
+ // Custom API call to your specified endpoint
282
+ const response = await fetch(`${CUSTOM_API_BASE_URL}/chat/completions`, {
283
+ method: 'POST',
284
+ headers: {
285
+ 'Content-Type': 'application/json',
286
+ 'Authorization': `Bearer ${CUSTOM_API_KEY}`
287
+ },
288
+ body: JSON.stringify({
289
+ model: "gpt-4-0125-preview", // You can adjust this model as needed
290
+ messages: messages,
291
+ stream: true,
292
+ max_tokens: selectedProvider.max_tokens || 4096
293
+ })
294
  });
295
 
296
+ if (!response.ok) {
297
+ const errorData = await response.json();
298
+ throw new Error(errorData.error?.message || 'Failed to get a response from the API');
299
+ }
300
+
301
+ // Handle streaming response
302
+ const reader = response.body.getReader();
303
+ const decoder = new TextDecoder();
304
+
305
  while (true) {
306
+ const { done, value } = await reader.read();
307
+ if (done) break;
308
+
309
+ const chunk = decoder.decode(value, { stream: true });
310
+ const lines = chunk.split('\n').filter(line => line.trim() !== '');
311
+
312
+ for (const line of lines) {
313
+ if (line.startsWith('data: ')) {
314
+ const data = line.substring(6);
315
+ if (data === '[DONE]') continue;
316
+
317
+ try {
318
+ const parsed = JSON.parse(data);
319
+ const content = parsed.choices[0]?.delta?.content || '';
320
+
321
+ if (content) {
322
+ res.write(content);
323
+ completeResponse += content;
324
+
325
+ if (completeResponse.includes("</html>")) {
326
+ // End if we see closing HTML tag
327
+ reader.cancel();
328
+ break;
329
+ }
330
+ }
331
+ } catch (e) {
332
+ console.error('Error parsing stream chunk:', e);
333
  }
334
  }
335
  }
336
  }
337
+
338
  res.end();
339
  } catch (error) {
340
+ console.error('API error:', error);
341
+
342
  if (error.message.includes("exceeded your monthly included credits")) {
343
  return res.status(402).send({
344
  ok: false,
 
346
  message: error.message,
347
  });
348
  }
349
+
350
  if (!res.headersSent) {
351
  res.status(500).send({
352
  ok: false,
 
354
  error.message || "An error occurred while processing your request.",
355
  });
356
  } else {
 
357
  res.end();
358
  }
359
  }
 
401
 
402
  app.listen(PORT, () => {
403
  console.log(`Server is running on port ${PORT}`);
404
+ });