adowu commited on
Commit
b96f7b5
·
1 Parent(s): 6533564

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -52
app.py CHANGED
@@ -9,10 +9,6 @@ import pandas as pd
9
  from huggingface_hub import upload_file
10
  from text_generation import Client
11
 
12
- from dialogues import DialogueTemplate
13
- from share_btn import (community_icon_html, loading_icon_html, share_btn_css,
14
- share_js)
15
-
16
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
17
  API_TOKEN = os.environ.get("API_TOKEN", None)
18
  DIALOGUES_DATASET = "HuggingFaceH4/starchat_playground_dialogues"
@@ -188,19 +184,6 @@ def generate(
188
 
189
  return chat, history, user_message, ""
190
 
191
-
192
- examples = [
193
- "How can I write a Python function to generate the nth Fibonacci number?",
194
- "How do I get the current date using shell commands? Explain how it works.",
195
- "What's the meaning of life?",
196
- "Write a function in Javascript to reverse words in a given string.",
197
- "Give the following data {'Name':['Tom', 'Brad', 'Kyle', 'Jerry'], 'Age':[20, 21, 19, 18], 'Height' : [6.1, 5.9, 6.0, 6.1]}. Can you plot one graph with two subplots as columns. The first is a bar graph showing the height of each person. The second is a bargraph showing the age of each person? Draw the graph in seaborn talk mode.",
198
- "Create a regex to extract dates from logs",
199
- "How to decode JSON into a typescript object",
200
- "Write a list into a jsonlines file and save locally",
201
- ]
202
-
203
-
204
  def clear_chat():
205
  return [], []
206
 
@@ -259,41 +242,8 @@ def retry_last_answer(
259
  )
260
 
261
 
262
- title = """<h1 align="center">⭐ StarChat Playground 💬</h1>"""
263
- custom_css = """
264
- #banner-image {
265
- display: block;
266
- margin-left: auto;
267
- margin-right: auto;
268
- }
269
-
270
- #chat-message {
271
- font-size: 14px;
272
- min-height: 300px;
273
- }
274
- """
275
-
276
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
277
  gr.HTML(title)
278
-
279
- with gr.Row():
280
- with gr.Column():
281
- gr.Image("thumbnail.png", elem_id="banner-image", show_label=False)
282
- with gr.Column():
283
- gr.Markdown(
284
- """
285
- 💻 This demo showcases a series of **[StarChat](https://huggingface.co/models?search=huggingfaceh4/starchat)** language models, which are fine-tuned versions of the StarCoder family to act as helpful coding assistants. The base model has 16B parameters and was pretrained on one trillion tokens sourced from 80+ programming languages, GitHub issues, Git commits, and Jupyter notebooks (all permissively licensed).
286
-
287
- 📝 For more details, check out our [blog post](https://huggingface.co/blog/starchat-alpha).
288
-
289
- ⚠️ **Intended Use**: this app and its [supporting models](https://huggingface.co/models?search=huggingfaceh4/starchat) are provided as educational tools to explain large language model fine-tuning; not to serve as replacement for human expertise.
290
-
291
- ⚠️ **Known Failure Modes**: the alpha and beta version of **StarChat** have not been aligned to human preferences with techniques like RLHF, so they can produce problematic outputs (especially when prompted to do so). Since the base model was pretrained on a large corpus of code, it may produce code snippets that are syntactically valid but semantically incorrect. For example, it may produce code that does not compile or that produces incorrect results. It may also produce code that is vulnerable to security exploits. We have observed the model also has a tendency to produce false URLs which should be carefully inspected before clicking. For more details on the model's limitations in terms of factuality and biases, see the [model card](https://huggingface.co/HuggingFaceH4/starchat-alpha#bias-risks-and-limitations).
292
-
293
- ⚠️ **Data Collection**: by default, we are collecting the prompts entered in this app to further improve and evaluate the models. Do **NOT** share any personal or sensitive information while using the app! You can opt out of this data collection by removing the checkbox below.
294
- """
295
- )
296
-
297
  with gr.Row():
298
  do_save = gr.Checkbox(
299
  value=True,
@@ -451,6 +401,5 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
451
  delete_turn_button.click(delete_last_turn, [chatbot, history], [chatbot, history])
452
  clear_chat_button.click(clear_chat, outputs=[chatbot, history])
453
  selected_model.change(clear_chat, outputs=[chatbot, history])
454
- # share_button.click(None, [], [], _js=share_js)
455
-
456
  demo.queue(concurrency_count=16).launch(debug=True)
 
9
  from huggingface_hub import upload_file
10
  from text_generation import Client
11
 
 
 
 
 
12
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
  API_TOKEN = os.environ.get("API_TOKEN", None)
14
  DIALOGUES_DATASET = "HuggingFaceH4/starchat_playground_dialogues"
 
184
 
185
  return chat, history, user_message, ""
186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  def clear_chat():
188
  return [], []
189
 
 
242
  )
243
 
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
246
  gr.HTML(title)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
  with gr.Row():
248
  do_save = gr.Checkbox(
249
  value=True,
 
401
  delete_turn_button.click(delete_last_turn, [chatbot, history], [chatbot, history])
402
  clear_chat_button.click(clear_chat, outputs=[chatbot, history])
403
  selected_model.change(clear_chat, outputs=[chatbot, history])
404
+
 
405
  demo.queue(concurrency_count=16).launch(debug=True)