mishig HF staff commited on
Commit
3861337
·
1 Parent(s): d951613

both models working

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -11,7 +11,8 @@ from share_btn import community_icon_html, loading_icon_html, share_js, share_bt
11
 
12
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
 
14
- API_URL = "https://api-inference.huggingface.co/models/Phind/Phind-CodeLlama-34B-v2"
 
15
 
16
  FIM_PREFIX = "<PRE> "
17
  FIM_MIDDLE = " <MID>"
@@ -35,15 +36,13 @@ theme = gr.themes.Monochrome(
35
  ],
36
  )
37
 
38
- client = Client(
39
- API_URL,
40
- headers={"Authorization": f"Bearer {HF_TOKEN}"},
41
- )
42
-
43
-
44
  def generate(
45
- prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
46
  ):
 
 
 
 
47
 
48
  temperature = float(temperature)
49
  if temperature < 1e-2:
@@ -94,7 +93,7 @@ def generate(
94
  return output
95
 
96
  def generate_both(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
97
- generator_1, generator_2 = generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0), generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
98
  output_1, output_2 = "", ""
99
  output_1_end, output_2_end = False, False
100
 
@@ -163,8 +162,8 @@ with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
163
  elem_id="q-input",
164
  )
165
  submit = gr.Button("Generate", variant="primary")
166
- output_1 = gr.Code(elem_id="q-output", lines=30, label="Output")
167
- output_2 = gr.Code(elem_id="q-output", lines=30, label="Output")
168
  with gr.Row():
169
  with gr.Column():
170
  with gr.Accordion("Advanced settings", open=False):
 
11
 
12
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
 
14
+ API_URL = "https://api-inference.huggingface.co/models/"
15
+ model_id_1, model_id_2 = "Phind/Phind-CodeLlama-34B-v2", "WizardLM/WizardCoder-Python-34B-V1.0"
16
 
17
  FIM_PREFIX = "<PRE> "
18
  FIM_MIDDLE = " <MID>"
 
36
  ],
37
  )
38
 
 
 
 
 
 
 
39
  def generate(
40
+ model_id, prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
41
  ):
42
+ client = Client(
43
+ f"{API_URL}{model_id}",
44
+ headers={"Authorization": f"Bearer {HF_TOKEN}"},
45
+ )
46
 
47
  temperature = float(temperature)
48
  if temperature < 1e-2:
 
93
  return output
94
 
95
  def generate_both(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
96
+ generator_1, generator_2 = generate(model_id_1, prompt, temperature, max_new_tokens, top_p, repetition_penalty), generate(model_id_2, prompt, temperature, max_new_tokens, top_p, repetition_penalty)
97
  output_1, output_2 = "", ""
98
  output_1_end, output_2_end = False, False
99
 
 
162
  elem_id="q-input",
163
  )
164
  submit = gr.Button("Generate", variant="primary")
165
+ output_1 = gr.Code(elem_id="q-output", lines=30, label=f"{model_id_1} Output")
166
+ output_2 = gr.Code(elem_id="q-output", lines=30, label=f"{model_id_2} Output")
167
  with gr.Row():
168
  with gr.Column():
169
  with gr.Accordion("Advanced settings", open=False):