jonaschua commited on
Commit
02c7617
·
verified ·
1 Parent(s): 74cc87b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -24
app.py CHANGED
@@ -17,16 +17,24 @@ For more information on `huggingface_hub` Inference API support, please check th
17
  client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=os.getenv('deepseekv2'))
18
 
19
 
20
- @spaces.GPU(duration=13)
21
- def respond(
22
- message,
23
- history: list[tuple[str, str]],
24
- model,
25
- system_message,
26
- max_tokens,
27
- temperature,
28
- top_p,
29
- ):
 
 
 
 
 
 
 
 
30
  messages = [{"role": "system", "content": system_message}]
31
 
32
  for val in history:
@@ -39,13 +47,7 @@ def respond(
39
 
40
  response = ""
41
 
42
- for message in client.chat_completion(
43
- messages,
44
- max_tokens=max_tokens,
45
- stream=True,
46
- temperature=temperature,
47
- top_p=top_p,
48
- ):
49
  token = message.choices[0].delta.content
50
 
51
  response += token
@@ -66,13 +68,7 @@ demo = gr.ChatInterface(
66
  gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
67
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
68
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
69
- gr.Slider(
70
- minimum=0.1,
71
- maximum=1.0,
72
- value=0.95,
73
- step=0.05,
74
- label="Top-p (nucleus sampling)",
75
- ),
76
  ],
77
  )
78
 
 
17
  client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=os.getenv('deepseekv2'))
18
 
19
 
20
+ def choose_model(model_name):
21
+ if model_name == "Qwen1.5":
22
+ client = InferenceClient("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", token=os.getenv('deepseekv2'))
23
+
24
+ elif model_name == "Llama3-8b-Instruct":
25
+ client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=os.getenv('deepseekv2'))
26
+
27
+ elif model_name == "Llama3.1-8b-Instruct"
28
+ client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=os.getenv('deepseekv2'))
29
+
30
+ return
31
+
32
+
33
+ @spaces.GPU(duration=1)
34
+ def respond(message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p,):
35
+
36
+ print(model)
37
+ choose_model(model)
38
  messages = [{"role": "system", "content": system_message}]
39
 
40
  for val in history:
 
47
 
48
  response = ""
49
 
50
+ for message in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p):
 
 
 
 
 
 
51
  token = message.choices[0].delta.content
52
 
53
  response += token
 
68
  gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
69
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
70
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
71
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
72
  ],
73
  )
74