haryoaw commited on
Commit
0addc9d
·
1 Parent(s): e2c9a67

initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
 
1
+ # Indonesia Recipe Ingredients Generator Model
2
+
3
+ 😎 *Have fun on generating ingredients* 😎
4
+
5
+ This is a fine-tuned model to generate the Indonesian food ingredients. One of my personal project that I did in my free time.
6
+
7
+ Basically, you give the name of the food and it will produce the ingredients of the food.
8
+
9
+ ## Model
10
+
11
+ Data: [Indonesian Recipe Data on Kaggle](https://www.kaggle.com/datasets/canggih/indonesian-food-recipes)
12
+ Pre-trained Model: [IndoBART-v2](https://huggingface.co/indobenchmark/indobart-v2)
13
+
14
+ ## How to use
15
+
16
+ We will specify the use of the tokenizer and the model.
17
+
18
+ ### Tokenizer
19
+
20
+ Since we use `indobart-v2`, we need to use their tokenizer.
21
+
22
+ First, install the tokenizer using `pip install indobenchmark-toolkit`.
23
+
24
+ After that, you can load the tokenizer:
25
+
26
+ ```python
27
+ from indobenchmark.tokenization_indonlg import IndoNLGTokenizer
28
+
29
+ tokenizer = IndoNLGTokenizer.from_pretrained("haryoaw/id-recigen-bart")
30
+ ```
31
+
32
+ ### Model
33
+
34
+ The model can be loaded by using AutoModel.
35
+
36
+ ```python
37
+ from transformers import AutoModelWithLMHead
38
+
39
+ model = AutoModelWithLMHead.from_pretrained("haryoaw/id-recigen-bart")
40
+ ```
41
+
42
+
43
+ ## Example of input
44
+
45
+ ```
46
+ sayur asam
47
+ ```
48
+
49
+ ```
50
+ nasi goreng ayam
51
+ ```
52
+
53
+ ~To be continued
54
+
55
  ---
56
  license: mit
57
  ---
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[java]": 40004, "[sunda]": 40005, "[indonesia]": 40006}
config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "indobenchmark/indobart-v2",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "MBartForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 0,
12
+ "classif_dropout": 0.1,
13
+ "classifier_dropout": 0.0,
14
+ "d_model": 768,
15
+ "decoder_attention_heads": 12,
16
+ "decoder_ffn_dim": 3072,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 6,
19
+ "decoder_start_token_id": 2,
20
+ "dropout": 0.1,
21
+ "early_stopping": true,
22
+ "encoder_attention_heads": 12,
23
+ "encoder_ffn_dim": 3072,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 2,
27
+ "forced_eos_token_id": 2,
28
+ "gradient_checkpointing": false,
29
+ "id2label": {
30
+ "0": "LABEL_0",
31
+ "1": "LABEL_1",
32
+ "2": "LABEL_2"
33
+ },
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2
40
+ },
41
+ "max_position_embeddings": 1024,
42
+ "model_type": "mbart",
43
+ "no_repeat_ngram_size": 3,
44
+ "normalize_before": false,
45
+ "normalize_embedding": true,
46
+ "num_beams": 4,
47
+ "num_hidden_layers": 6,
48
+ "pad_token_id": 1,
49
+ "scale_embedding": false,
50
+ "task_specific_params": {
51
+ "summarization": {
52
+ "length_penalty": 1.0,
53
+ "max_length": 128,
54
+ "min_length": 12,
55
+ "num_beams": 4
56
+ },
57
+ "summarization_cnn": {
58
+ "length_penalty": 2.0,
59
+ "max_length": 142,
60
+ "min_length": 56,
61
+ "num_beams": 4
62
+ },
63
+ "summarization_xsum": {
64
+ "length_penalty": 1.0,
65
+ "max_length": 62,
66
+ "min_length": 11,
67
+ "num_beams": 6
68
+ }
69
+ },
70
+ "tokenizer_class": "IndoNLGTokenizer",
71
+ "torch_dtype": "float32",
72
+ "transformers_version": "4.17.0",
73
+ "use_cache": true,
74
+ "vocab_size": 40004
75
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41feaa6591f0dd254c9fc51cbc94ba87e173ee0cdea1b7232c6107a54a4b4c0a
3
+ size 526426993
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5508d2b2cf0a4a436783109db228742d2c8a1a70d94e3623a168e2b2b76b9cdf
3
+ size 931715
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": ["[java]", "[sunda]", "[indonesia]", "<mask>"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["[java]", "[sunda]", "[indonesia]", "<mask>"], "model_max_length": 768, "special_tokens_map_file": "C:\\Users\\62852/.cache\\huggingface\\transformers\\926850b94af761a8e24968d04fc4a1cf7b49a5f2f729eb1e9eca9adb10f8e623.d1720ecf2a739704b6f50fd1a86f320a93102bdf215b7e9f5450e0e35e897c41", "name_or_path": "indobenchmark/indobart-v2", "tokenizer_class": "IndoNLGTokenizer"}