Upload folder using huggingface_hub

#40
by sharpenb - opened
Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpa_kbe1kbm0r5ky3m",
3
  "activation_function": "gelu_new",
4
  "all_reduce_scores": {
5
  "0": "SUCCESS",
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpb62i5w_8nnme5cvh",
3
  "activation_function": "gelu_new",
4
  "all_reduce_scores": {
5
  "0": "SUCCESS",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c32afbdac543c395aceb24b02efb438ba6193f518176db23f1c6f517b3c9851e
3
  size 2306069384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b855b887cd958d16fedfce0f6371c6eb0b9f83c3cb6a0967fa5b6d47d5f00ea
3
  size 2306069384
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpa_kbe1kb",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpb62i5w_8",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}