abidlabs HF staff commited on
Commit
0db4b92
·
verified ·
1 Parent(s): 5e105d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -95,7 +95,6 @@ def inference(prompt, hf_token, model, model_name):
95
  messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}]
96
  if hf_token is None or not hf_token.strip():
97
  hf_token = os.getenv("HF_TOKEN")
98
- print("using hf token env")
99
  client = InferenceClient(model=model, token=hf_token)
100
  tokens = f"**`{model_name}`**\n\n"
101
  for completion in client.chat_completion(messages, max_tokens=200, stream=True):
@@ -214,6 +213,4 @@ with gr.Blocks(css=css, theme="NoCrypt/miku", js=js) as demo:
214
  )
215
 
216
 
217
- demo.launch(show_api=False)
218
-
219
-
 
95
  messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}]
96
  if hf_token is None or not hf_token.strip():
97
  hf_token = os.getenv("HF_TOKEN")
 
98
  client = InferenceClient(model=model, token=hf_token)
99
  tokens = f"**`{model_name}`**\n\n"
100
  for completion in client.chat_completion(messages, max_tokens=200, stream=True):
 
213
  )
214
 
215
 
216
+ demo.launch(show_api=False)