PrarthanaTS commited on
Commit
13c631e
Β·
1 Parent(s): ce5ad51

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -9
app.py CHANGED
@@ -9,15 +9,9 @@ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
9
  num_new_tokens = 200 # change to the number of new tokens you want to generate
10
 
11
  DESCRIPTION = """\
12
- # πŸ§‘πŸ½β€πŸ’»Microsoft Phi2 ChatbotπŸ€–
13
- This Space demonstrates model [Microsoft Phi2 2.7B](https://huggingface.co/microsoft/phi-2), a model with 2.78B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
14
- \n πŸ”Ž For more details about the finetuning, take a look at the [GitHub](https://github.com/mkthoma/llm_finetuning) code.
15
- \n β›”β›” The model is hosted on a CPU and inference takes a long time. Please feel free to duplicate the space and use it on a GPU β›”β›”
16
- """
17
 
18
- LICENSE = """
19
- As a derivate work of [Microsoft Phi2 2.7B](https://huggingface.co/microsoft/phi-2), this demo is governed by the original [license](https://huggingface.co/microsoft/phi-2/resolve/main/LICENSE).
20
- """
21
 
22
  def generate(question, context, max_new_tokens = 200, temperature = 0.6):
23
 
@@ -37,7 +31,7 @@ def generate(question, context, max_new_tokens = 200, temperature = 0.6):
37
  bbchatbot = gr.Chatbot(
38
  avatar_images=["logo/user_logo.png", "logo/bot_logo.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
39
 
40
- examples = [["What is a large language model?"], ["How to calm down a person?"], ["What is aritificial intelligence?"], ["How to write a good resume?"]]
41
 
42
  additional_inputs = additional_inputs=[gr.Slider(label="Max new tokens",minimum=100,maximum=500,step=10,value=num_new_tokens),
43
  gr.Slider(label="Temperature",minimum=0.1,maximum=4.0,step=0.1,value=0.6)]
 
9
  num_new_tokens = 200 # change to the number of new tokens you want to generate
10
 
11
  DESCRIPTION = """\
12
+ # Microsoft Phi2 Chatbot
13
+ \n The model is hosted on a CPU and inference takes a long time. Please feel free to duplicate the space and use it on a GPU"""
 
 
 
14
 
 
 
 
15
 
16
  def generate(question, context, max_new_tokens = 200, temperature = 0.6):
17
 
 
31
  bbchatbot = gr.Chatbot(
32
  avatar_images=["logo/user_logo.png", "logo/bot_logo.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
33
 
34
+ examples = [["What are transformers?"], ["What are LLMs"], ["What is machine learning?"], ["How to write a good resume?"]]
35
 
36
  additional_inputs = additional_inputs=[gr.Slider(label="Max new tokens",minimum=100,maximum=500,step=10,value=num_new_tokens),
37
  gr.Slider(label="Temperature",minimum=0.1,maximum=4.0,step=0.1,value=0.6)]