prince-canuma commited on
Commit
3eb1768
·
verified ·
1 Parent(s): 5d95ed7

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ tags:
13
  ---
14
 
15
  # mlx-community/gemma-3-12b-it-8bit
16
- This model was converted to MLX format from [`google/gemma-3-12b-it`]() using mlx-vlm version **0.1.17**.
17
  Refer to the [original model card](https://huggingface.co/google/gemma-3-12b-it) for more details on the model.
18
  ## Use with mlx
19
 
 
13
  ---
14
 
15
  # mlx-community/gemma-3-12b-it-8bit
16
+ This model was converted to MLX format from [`google/gemma-3-12b-it`]() using mlx-vlm version **0.1.18**.
17
  Refer to the [original model card](https://huggingface.co/google/gemma-3-12b-it) for more details on the model.
18
  ## Use with mlx
19
 
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "transformers_version": "4.50.0.dev0"
11
+ }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b618ba98316aa58f52671b7d26755eb5b6f57c88cb592b08fb19e9350d1adbf9
3
- size 5350881161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d979d00060ff7f67a5804d23060834dec3fdd89c31fb199bcae7a5a1d73e13
3
+ size 5350881892
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2235ff3821e72e2b2241bcd906b9dacc36036e94f9a7466a372888ad918e0804
3
- size 5365256334
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d57055b0250e7ceffd60b92a25bf45204a7fc4104ec3c57853b1faa7d8bde8a4
3
+ size 5365256782
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b820396cc7d0c7bcfa35d08fcbc2d92892f5c3024917cec866a04e5380d08631
3
- size 3698609919
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d49948e464cafc1539e8a6dd3292ec7599a7a4f1c1090c74119c9dc29fcda048
3
+ size 3698610144
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }