Upload folder using huggingface_hub
Browse files- .gitattributes +12 -0
- README.md +47 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-IQ4_XS.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q2_K.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_L.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_M.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_S.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_M.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_S.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_M.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_S.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q6_K.gguf +3 -0
- Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q8_0.gguf +3 -0
- featherless-quants.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
45 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
46 |
+
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
47 |
+
featherless-quants.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24
|
3 |
+
pipeline_tag: text-generation
|
4 |
+
quantized_by: featherless-ai-quants
|
5 |
+
---
|
6 |
+
|
7 |
+
# Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24 GGUF Quantizations π
|
8 |
+
|
9 |
+

|
10 |
+
|
11 |
+
*Optimized GGUF quantization files for enhanced model performance*
|
12 |
+
|
13 |
+
> Powered by [Featherless AI](https://featherless.ai) - run any model you'd like for a simple small fee.
|
14 |
+
---
|
15 |
+
|
16 |
+
## Available Quantizations π
|
17 |
+
|
18 |
+
| Quantization Type | File | Size |
|
19 |
+
|-------------------|------|------|
|
20 |
+
| IQ4_XS | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-IQ4_XS.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-IQ4_XS.gguf) | 6485.05 MB |
|
21 |
+
| Q2_K | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q2_K.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q2_K.gguf) | 4569.11 MB |
|
22 |
+
| Q3_K_L | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_L.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_L.gguf) | 6257.55 MB |
|
23 |
+
| Q3_K_M | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_M.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_M.gguf) | 5801.30 MB |
|
24 |
+
| Q3_K_S | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_S.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_S.gguf) | 5277.86 MB |
|
25 |
+
| Q4_K_M | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_M.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_M.gguf) | 7130.83 MB |
|
26 |
+
| Q4_K_S | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_S.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_S.gguf) | 6790.36 MB |
|
27 |
+
| Q5_K_M | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_M.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_M.gguf) | 8323.33 MB |
|
28 |
+
| Q5_K_S | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_S.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_S.gguf) | 8124.11 MB |
|
29 |
+
| Q6_K | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q6_K.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q6_K.gguf) | 9590.37 MB |
|
30 |
+
| Q8_0 | [Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q8_0.gguf](https://huggingface.co/featherless-ai-quants/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-GGUF/blob/main/Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q8_0.gguf) | 12419.12 MB |
|
31 |
+
|
32 |
+
|
33 |
+
---
|
34 |
+
|
35 |
+
## β‘ Powered by [Featherless AI](https://featherless.ai)
|
36 |
+
|
37 |
+
### Key Features
|
38 |
+
|
39 |
+
- π₯ **Instant Hosting** - Deploy any Llama model on HuggingFace instantly
|
40 |
+
- π οΈ **Zero Infrastructure** - No server setup or maintenance required
|
41 |
+
- π **Vast Compatibility** - Support for 2400+ models and counting
|
42 |
+
- π **Affordable Pricing** - Starting at just $10/month
|
43 |
+
|
44 |
+
---
|
45 |
+
|
46 |
+
**Links:**
|
47 |
+
[Get Started](https://featherless.ai) | [Documentation](https://featherless.ai/docs) | [Models](https://featherless.ai/models)
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-IQ4_XS.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d16433ac3001756fd02e8ca507cb5d7e300a91eac6f9a09bbc7d06602cbd51b
|
3 |
+
size 6800067648
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q2_K.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4017ff4df02f791d3c13724ed577ba61a8d80d0beb1a61b23e8297fb3ca0a602
|
3 |
+
size 4791059488
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_L.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8985cc2d804966558fb15b24171e4f4b4d580a4a9b0a14d431b4ba0a59396e76
|
3 |
+
size 6561515584
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bc40fff8d2835c51cb78e4b8f195ddce8a1bc8150116982a8f1c67333bbdc6c
|
3 |
+
size 6083102784
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q3_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5cf9007e94e73d9ed41371734ea283687a0617b74951648728d2c54cc57d34bc
|
3 |
+
size 5534238784
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da419dac2348261729891435828fa93c452ba40d25b855d8ea362a9fb0c397ae
|
3 |
+
size 7477218688
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q4_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa6054bfa1bef490282d3eaad9503a8a8106020e6c33cdc1ee6c66313e554717
|
3 |
+
size 7120211328
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2b6782e17b22bea4c37a5f66ecd56a67c45df3c49339e0b8bab1288786f2e5c
|
3 |
+
size 8727646848
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q5_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e416d005d96a1e420d48d1a5791fd420f8a703a9091ac8ada5f2cf59a790d0ca
|
3 |
+
size 8518750848
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q6_K.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f9eccdb5b9848cd389ee2052a12e09704cea09ed85a6e46f9ab5222cb2b8242
|
3 |
+
size 10056226784
|
Vikhrmodels-Vikhr-Nemo-12B-Instruct-R-21-09-24-Q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd64eb222aaf18c5419d07ae32e1c058d9f898f58d744a410170d2d7e7d6d290
|
3 |
+
size 13022391072
|
featherless-quants.png
ADDED
![]() |
Git LFS Details
|