abidlabs HF staff commited on
Commit
5e105d3
·
verified ·
1 Parent(s): 6aefa50

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -0
app.py CHANGED
@@ -95,6 +95,7 @@ def inference(prompt, hf_token, model, model_name):
95
  messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}]
96
  if hf_token is None or not hf_token.strip():
97
  hf_token = os.getenv("HF_TOKEN")
 
98
  client = InferenceClient(model=model, token=hf_token)
99
  tokens = f"**`{model_name}`**\n\n"
100
  for completion in client.chat_completion(messages, max_tokens=200, stream=True):
 
95
  messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}]
96
  if hf_token is None or not hf_token.strip():
97
  hf_token = os.getenv("HF_TOKEN")
98
+ print("using hf token env")
99
  client = InferenceClient(model=model, token=hf_token)
100
  tokens = f"**`{model_name}`**\n\n"
101
  for completion in client.chat_completion(messages, max_tokens=200, stream=True):