lysandre HF Staff commited on
Commit
dd33dbb
·
1 Parent(s): eda22ed

Upload dataset_infos.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_infos.json +45 -45
dataset_infos.json CHANGED
@@ -16,78 +16,78 @@
16
  "splits": {
17
  "datasets": {
18
  "name": "datasets",
19
- "num_bytes": 18920,
20
- "num_examples": 860,
21
  "dataset_name": null
22
  },
23
- "accelerate": {
24
- "name": "accelerate",
25
- "num_bytes": 18920,
26
- "num_examples": 860,
27
  "dataset_name": null
28
  },
29
- "safetensors": {
30
- "name": "safetensors",
31
- "num_bytes": 4356,
32
- "num_examples": 198,
33
  "dataset_name": null
34
  },
35
  "transformers": {
36
  "name": "transformers",
37
- "num_bytes": 22968,
38
- "num_examples": 1044,
39
  "dataset_name": null
40
  },
41
- "pytorch_image_models": {
42
- "name": "pytorch_image_models",
43
- "num_bytes": 22286,
44
- "num_examples": 1013,
45
  "dataset_name": null
46
  },
47
- "evaluate": {
48
- "name": "evaluate",
49
- "num_bytes": 10890,
50
- "num_examples": 495,
51
  "dataset_name": null
52
  },
53
- "gradio": {
54
- "name": "gradio",
55
- "num_bytes": 22286,
56
- "num_examples": 1013,
57
  "dataset_name": null
58
  },
59
  "peft": {
60
  "name": "peft",
61
- "num_bytes": 3146,
62
- "num_examples": 143,
63
  "dataset_name": null
64
  },
65
- "huggingface_hub": {
66
- "name": "huggingface_hub",
67
- "num_bytes": 19800,
68
- "num_examples": 900,
69
  "dataset_name": null
70
  },
71
- "diffusers": {
72
- "name": "diffusers",
73
- "num_bytes": 8294,
74
- "num_examples": 377,
75
  "dataset_name": null
76
  },
77
- "tokenizers": {
78
- "name": "tokenizers",
79
- "num_bytes": 22286,
80
- "num_examples": 1013,
81
  "dataset_name": null
82
  },
83
- "optimum": {
84
- "name": "optimum",
85
- "num_bytes": 13904,
86
- "num_examples": 632,
87
  "dataset_name": null
88
  }
89
  },
90
- "download_size": 111786,
91
- "dataset_size": 188056,
92
- "size_in_bytes": 299842
93
  }}
 
16
  "splits": {
17
  "datasets": {
18
  "name": "datasets",
19
+ "num_bytes": 19250,
20
+ "num_examples": 875,
21
  "dataset_name": null
22
  },
23
+ "optimum": {
24
+ "name": "optimum",
25
+ "num_bytes": 14234,
26
+ "num_examples": 647,
27
  "dataset_name": null
28
  },
29
+ "pytorch_image_models": {
30
+ "name": "pytorch_image_models",
31
+ "num_bytes": 22616,
32
+ "num_examples": 1028,
33
  "dataset_name": null
34
  },
35
  "transformers": {
36
  "name": "transformers",
37
+ "num_bytes": 23298,
38
+ "num_examples": 1059,
39
  "dataset_name": null
40
  },
41
+ "diffusers": {
42
+ "name": "diffusers",
43
+ "num_bytes": 8624,
44
+ "num_examples": 392,
45
  "dataset_name": null
46
  },
47
+ "huggingface_hub": {
48
+ "name": "huggingface_hub",
49
+ "num_bytes": 20130,
50
+ "num_examples": 915,
51
  "dataset_name": null
52
  },
53
+ "safetensors": {
54
+ "name": "safetensors",
55
+ "num_bytes": 4686,
56
+ "num_examples": 213,
57
  "dataset_name": null
58
  },
59
  "peft": {
60
  "name": "peft",
61
+ "num_bytes": 3476,
62
+ "num_examples": 158,
63
  "dataset_name": null
64
  },
65
+ "evaluate": {
66
+ "name": "evaluate",
67
+ "num_bytes": 11220,
68
+ "num_examples": 510,
69
  "dataset_name": null
70
  },
71
+ "gradio": {
72
+ "name": "gradio",
73
+ "num_bytes": 22616,
74
+ "num_examples": 1028,
75
  "dataset_name": null
76
  },
77
+ "accelerate": {
78
+ "name": "accelerate",
79
+ "num_bytes": 19250,
80
+ "num_examples": 875,
81
  "dataset_name": null
82
  },
83
+ "tokenizers": {
84
+ "name": "tokenizers",
85
+ "num_bytes": 22616,
86
+ "num_examples": 1028,
87
  "dataset_name": null
88
  }
89
  },
90
+ "download_size": 113859,
91
+ "dataset_size": 192016,
92
+ "size_in_bytes": 305875
93
  }}