anakin87 commited on
Commit
293f365
ยท
verified ยท
1 Parent(s): f3c41a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -8,14 +8,12 @@ import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
  DESCRIPTION = """\
11
- # Gemma 2 2B Neogenesis ITA ๐Ÿ’Ž๐ŸŒ๐Ÿ‡ฎ๐Ÿ‡น
12
 
13
- Fine-tuned version of Google/gemma-2-2b-it to improve the performance on the Italian language.
14
- Small (2.6 B parameters) but good model, with 8k context length.
15
 
16
- [๐Ÿชช **Model card**](https://huggingface.co/anakin87/gemma-2-2b-neogenesis-ita)
17
-
18
- [๐Ÿ““ **Kaggle notebook**](https://www.kaggle.com/code/anakin87/post-training-gemma-for-italian-and-beyond) - Learn how this model was trained.
19
  """
20
 
21
  MAX_MAX_NEW_TOKENS = 2048
@@ -24,7 +22,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
24
 
25
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
26
 
27
- model_id = "anakin87/gemma-2-2b-neogenesis-ita"
28
  tokenizer = AutoTokenizer.from_pretrained(model_id)
29
  model = AutoModelForCausalLM.from_pretrained(
30
  model_id,
 
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
  DESCRIPTION = """\
11
+ # Gemma 2 9B Neogenesis ITA ๐Ÿ’Ž๐ŸŒ๐Ÿ‡ฎ๐Ÿ‡น
12
 
13
+ Fine-tuned version of VAGOsolutions/SauerkrautLM-gemma-2-9b-it to improve the performance on the Italian language.
14
+ Good model with 9.24 billion parameters, with 8k context length.
15
 
16
+ [๐Ÿชช **Model card**](https://huggingface.co/anakin87/gemma-2-9b-neogenesis-ita)
 
 
17
  """
18
 
19
  MAX_MAX_NEW_TOKENS = 2048
 
22
 
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
+ model_id = "anakin87/gemma-2-9b-neogenesis-ita"
26
  tokenizer = AutoTokenizer.from_pretrained(model_id)
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_id,