xyizko commited on
Commit
593e98b
·
verified ·
1 Parent(s): 8490e63

Update app.py

Browse files

-- HF Chat fixes

Files changed (1) hide show
  1. app.py +14 -12
app.py CHANGED
@@ -11,14 +11,18 @@ def respond(message, history, token, model, system_message, max_tokens, temperat
11
 
12
  # Default model selection logic
13
  if not token:
14
- model = "HuggingFaceH4/zephyr-7b-beta" # Default public model that doesn't require token
 
15
  try:
16
  client = InferenceClient(model=model)
17
  except Exception as e:
18
  yield f"Error initializing client: {str(e)}"
19
  return
20
  else:
21
- model = model or "HuggingFaceH4/zephyr-7b-beta" # Default private model
 
 
 
22
  try:
23
  client = InferenceClient(model=model, token=token)
24
  except Exception as e:
@@ -53,26 +57,24 @@ def respond(message, history, token, model, system_message, max_tokens, temperat
53
  # Input components
54
  token_input = gr.Textbox(
55
  type="password",
56
- label="HF API Token (leave empty for public models)",
57
  placeholder="hf_XXXXXXXXXXXX"
58
  )
59
- model_input = gr.Dropdown(
60
- label="Model Name",
61
- choices=[
62
- "HuggingFaceH4/zephyr-7b-beta",
63
- ],
64
- value="HuggingFaceH4/zephyr-7b-beta"
65
  )
66
 
67
  # Chat interface
68
  demo = gr.ChatInterface(
69
  fn=respond,
70
- title="HF Model Chat Interface",
71
- description="Enter token for private models or use public models without token",
72
  additional_inputs=[
73
  token_input,
74
  model_input,
75
- gr.Textbox(value="You are helpful AI.", label="System Message"),
76
  gr.Slider(1, 2048, value=512, label="Max Tokens"),
77
  gr.Slider(0.1, 4.0, value=0.7, label="Temperature"),
78
  gr.Slider(0.1, 1.0, value=0.95, label="Top-p"),
 
11
 
12
  # Default model selection logic
13
  if not token:
14
+ # Use default public model when no token is provided
15
+ model = "HuggingFaceH4/zephyr-7b-beta"
16
  try:
17
  client = InferenceClient(model=model)
18
  except Exception as e:
19
  yield f"Error initializing client: {str(e)}"
20
  return
21
  else:
22
+ # Require model name when token is provided
23
+ if not model:
24
+ yield "Please provide a model name when using an HF token."
25
+ return
26
  try:
27
  client = InferenceClient(model=model, token=token)
28
  except Exception as e:
 
57
  # Input components
58
  token_input = gr.Textbox(
59
  type="password",
60
+ label="HF API Token (leave empty for public model)",
61
  placeholder="hf_XXXXXXXXXXXX"
62
  )
63
+ model_input = gr.Textbox(
64
+ label="Model Name (required if using token)",
65
+ placeholder="Enter model name when using token",
66
+ visible=True
 
 
67
  )
68
 
69
  # Chat interface
70
  demo = gr.ChatInterface(
71
  fn=respond,
72
+ title="XYIZKO HF Text Generation Model Tester",
73
+ description="Uses Zephyr-7b-beta by default. Add token + model name for private models",
74
  additional_inputs=[
75
  token_input,
76
  model_input,
77
+ gr.Textbox(value="You are helpful AI chatbot who reponds like a mideval knight who is extremely polite and noble.", label="System Message"),
78
  gr.Slider(1, 2048, value=512, label="Max Tokens"),
79
  gr.Slider(0.1, 4.0, value=0.7, label="Temperature"),
80
  gr.Slider(0.1, 1.0, value=0.95, label="Top-p"),