Votee commited on
Commit
886c9d6
·
1 Parent(s): aeaee34

upgrade to use hon9kon9ize/Cantonese-Llama-2-7B-preview20240903

Browse files
Files changed (2) hide show
  1. README.md +4 -4
  2. app.py +3 -3
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Cantonese-Llama-2-7B-preview20240625
3
  emoji: 🦙
4
  colorFrom: yellow
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.37.2
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Cantonese-Llama-2-7B-Preview20240903
3
  emoji: 🦙
4
  colorFrom: yellow
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
+ license: cc-by-sa-4.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -12,7 +12,7 @@ DEFAULT_MAX_NEW_TOKENS = 2048
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # hon9kon9ize/Cantonese-Llama-2-7B-preview20240625
16
 
17
  Please join our [Discord server](https://discord.gg/gG6GPp8XxQ) and give me your feedback
18
  """
@@ -23,7 +23,7 @@ if not torch.cuda.is_available():
23
 
24
 
25
  if torch.cuda.is_available():
26
- model_id = "hon9kon9ize/Cantonese-Llama-2-7B-preview20240625"
27
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
28
  model = torch.compile(model)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -124,4 +124,4 @@ with gr.Blocks() as demo:
124
  chat_interface.render()
125
 
126
  if __name__ == "__main__":
127
- demo.queue(max_size=20).launch()
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # hon9kon9ize/Cantonese-Llama-2-7B-preview20240903
16
 
17
  Please join our [Discord server](https://discord.gg/gG6GPp8XxQ) and give me your feedback
18
  """
 
23
 
24
 
25
  if torch.cuda.is_available():
26
+ model_id = "hon9kon9ize/Cantonese-Llama-2-7B-preview20240903"
27
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
28
  model = torch.compile(model)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
124
  chat_interface.render()
125
 
126
  if __name__ == "__main__":
127
+ demo.queue(max_size=20).launch()