jonaschua commited on
Commit
46e0200
·
verified ·
1 Parent(s): 02c7617

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -14,24 +14,25 @@ For more information on `huggingface_hub` Inference API support, please check th
14
  # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
  # client = InferenceClient("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", token=os.getenv('deepseekv2'))
16
  # client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=os.getenv('deepseekv2'))
17
- client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=os.getenv('deepseekv2'))
18
 
19
 
20
  def choose_model(model_name):
21
  if model_name == "Qwen1.5":
22
- client = InferenceClient("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", token=os.getenv('deepseekv2'))
23
 
24
  elif model_name == "Llama3-8b-Instruct":
25
- client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=os.getenv('deepseekv2'))
26
 
27
- elif model_name == "Llama3.1-8b-Instruct"
28
- client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=os.getenv('deepseekv2'))
29
-
 
30
  return
31
 
32
 
33
  @spaces.GPU(duration=1)
34
- def respond(message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p,):
35
 
36
  print(model)
37
  choose_model(model)
@@ -68,8 +69,8 @@ demo = gr.ChatInterface(
68
  gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
69
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
70
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
71
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
72
- ],
73
  )
74
 
75
 
 
14
  # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
  # client = InferenceClient("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", token=os.getenv('deepseekv2'))
16
  # client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=os.getenv('deepseekv2'))
17
+ # client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=os.getenv('deepseekv2'))
18
 
19
 
20
  def choose_model(model_name):
21
  if model_name == "Qwen1.5":
22
+ model = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
23
 
24
  elif model_name == "Llama3-8b-Instruct":
25
+ model = "meta-llama/Meta-Llama-3-8B-Instruct"
26
 
27
+ elif model_name == "Llama3.1-8b-Instruct":
28
+ model = "meta-llama/Llama-3.1-8B-Instruct"
29
+
30
+ client = InferenceClient(f"{model}, token=os.getenv('deepseekv2')")
31
  return
32
 
33
 
34
  @spaces.GPU(duration=1)
35
+ def respond(message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p):
36
 
37
  print(model)
38
  choose_model(model)
 
69
  gr.Textbox(value="You are a friendly and helpful Chatbot, be concise and straight to the point, avoid excessive reasoning.", label="System message"),
70
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
71
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
72
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
73
+ ]
74
  )
75
 
76