prince-canuma commited on
Commit
7f17ac5
·
verified ·
1 Parent(s): 276b570

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ tags:
13
  ---
14
 
15
  # mlx-community/gemma-3-27b-it-4bit
16
- This model was converted to MLX format from [`google/gemma-3-27b-it`]() using mlx-vlm version **0.1.17**.
17
  Refer to the [original model card](https://huggingface.co/google/gemma-3-27b-it) for more details on the model.
18
  ## Use with mlx
19
 
 
13
  ---
14
 
15
  # mlx-community/gemma-3-27b-it-4bit
16
+ This model was converted to MLX format from [`google/gemma-3-27b-it`]() using mlx-vlm version **0.1.18**.
17
  Refer to the [original model card](https://huggingface.co/google/gemma-3-27b-it) for more details on the model.
18
  ## Use with mlx
19
 
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "transformers_version": "4.50.0.dev0"
11
+ }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebb791d067c2c19d6963826a5ef44bdfa991dec1a7ad8ec34acb58471f8f0c34
3
- size 5368126106
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7245a86f204f747ef70cfd9a97f4cc43154c9d069bac728f5d15fcc5b40402fe
3
+ size 5368126871
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80e08ae92351ce986c0dd799fde59b899b5099394fd0d91f88df2ce286b6131a
3
- size 5355062194
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac2c9ae014660962187dafc41a12bb3149740e33714629ca24a56f172c01f3e
3
+ size 5355062658
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a8256d000f817c18bf52f229070fa01eceaec4bdd48caaa130a1b1616d1bc40
3
- size 5305524378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af2a4afc9e2b7889ca481e778eb955ed1d00c21f6d8e82afaa082455d7a207d4
3
+ size 5305524829
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d03f8ba00c87b8b6120a3f8241f4a1207ea12b0c16821638f2b83fce81b9342
3
- size 805306175
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b2559198db43d5822b4ddb74c2608ba1fef78ab3cba4f9975057396e5e4198e
3
+ size 805306179
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff