parquet-converter commited on
Commit
d083db9
·
1 Parent(s): bb88e1a

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,37 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- annotations_creators: []
3
- language:
4
- - code
5
- license:
6
- - other
7
- multilinguality:
8
- - monolingual
9
- size_categories:
10
- - unknown
11
- task_categories:
12
- - text-generation
13
- task_ids:
14
- - language-modeling
15
- pretty_name: github-jupyter-text-code-pairs
16
- ---
17
-
18
- This is a parsed version of [github-jupyter-parsed](https://huggingface.co/datasets/codeparrot/github-jupyter-parsed), with markdown and code pairs. We provide the preprocessing script in [preprocessing.py](https://huggingface.co/datasets/codeparrot/github-jupyter-parsed-v2/blob/main/preprocessing.py). The data is deduplicated and consists of 451662 examples.
19
-
20
- For similar datasets with text and Python code, there is [CoNaLa](https://huggingface.co/datasets/neulab/conala) benchmark from StackOverflow, with some samples curated by annotators.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1,52 +0,0 @@
1
- {"loubnabnl--github_jupyter_parsed_2": {
2
- "description": "",
3
- "citation": "",
4
- "homepage": "",
5
- "license": "",
6
- "features": {
7
- "markdown": {
8
- "dtype": "string",
9
- "id": null,
10
- "_type": "Value"
11
- },
12
- "code": {
13
- "dtype": "string",
14
- "id": null,
15
- "_type": "Value"
16
- },
17
- "path": {
18
- "dtype": "string",
19
- "id": null,
20
- "_type": "Value"
21
- },
22
- "repo_name": {
23
- "dtype": "string",
24
- "id": null,
25
- "_type": "Value"
26
- },
27
- "license": {
28
- "dtype": "string",
29
- "id": null,
30
- "_type": "Value"
31
- }
32
- },
33
- "post_processed": null,
34
- "supervised_keys": null,
35
- "task_templates": null,
36
- "builder_name": null,
37
- "config_name": null,
38
- "version": null,
39
- "splits": {
40
- "train": {
41
- "name": "train",
42
- "num_bytes": 367027734.92262346,
43
- "num_examples": 451662,
44
- "dataset_name": "github_jupyter_parsed_2"
45
- }
46
- },
47
- "download_checksums": null,
48
- "download_size": 181325865,
49
- "post_processing_size": null,
50
- "dataset_size": 367027734.92262346,
51
- "size_in_bytes": 548353599.9226234
52
- }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/train-00000-of-00001-81d7ff8514320810.parquet → loubnabnl--github_jupyter_parsed_2/parquet-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b32d3a702ea23a11ca1d18e903df7385d8e8181d6b7cde0de10832ffcbc4e1a
3
- size 181325865
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ec2af2ce3c13b2d3b1f8d7d82da6fd6b58434711366f3f562bd38a8e1609f23
3
+ size 185260303
preprocessing.py DELETED
@@ -1,92 +0,0 @@
1
- from tqdm import tqdm
2
- from datasets import load_dataset, Dataset
3
-
4
- import hashlib
5
- import re
6
- import time
7
- from datasets import load_dataset
8
-
9
-
10
- PATTERN = re.compile(r"\s+")
11
-
12
- def parse_data(ds):
13
- """Parse data into markdown-code pairs"""
14
- markdowns = []
15
- code_snippets = []
16
- paths = []
17
- repo_names = []
18
- licenses = []
19
- for i in tqdm(range(len(ds))):
20
- inner_markdowns = []
21
- inner_code_snippets = []
22
- types = ds[i]["types"]
23
- path = ds[i]["path"]
24
- repo = ds[i]["repo_name"]
25
- license = ds[i]["license"]
26
- if types[0] == "code":
27
- # drop first cell of code to have the notebook start with markdown
28
- cells = ds[i]["cells"][1:]
29
- types = types[1:]
30
- else:
31
- # drop first the two cells of markdown followed by code
32
- # the first markown cell of a notebook is often a long description of the whole notebook
33
- cells = ds[i]["cells"][2:]
34
- types = ds[i]["types"][2:]
35
- if len(cells) % 2 == 0:
36
- inner_markdowns = [cells[j] for j in range(len(cells)) if j % 2 == 0]
37
- inner_code_snippets = [cells[j+1] for j in range(len(cells) - 1) if j % 2 == 0]
38
- else:
39
- # delete last markdown cell that has no code next
40
- inner_markdowns = [cells[j] for j in range(len(cells) - 1) if j % 2 == 0]
41
- inner_code_snippets = [cells[j+1] for j in range(len(cells) - 2) if j % 2 == 0]
42
-
43
- markdowns.extend(inner_markdowns)
44
- code_snippets.extend(inner_code_snippets)
45
-
46
- paths.extend([path] * len(inner_markdowns))
47
- repo_names.extend([repo] * len(inner_markdowns))
48
- licenses.extend([license] * len(inner_markdowns))
49
- return markdowns, code_snippets, paths, repo_names, licenses
50
-
51
-
52
- def get_hash(example):
53
- """Get hash of markdown + code"""
54
- text = example["markdown"] + example["code"]
55
- return {"hash": hashlib.md5(re.sub(PATTERN, "", text).encode("utf-8")).hexdigest()}
56
-
57
- def preprocess(example):
58
- """add hash column to dataset."""
59
- results = dict()
60
- results.update(get_hash(example))
61
- return results
62
-
63
- def check_uniques(example, uniques):
64
- """Check if current hash is still in set of unique hashes and remove if true."""
65
- if example["hash"] in uniques:
66
- uniques.remove(example["hash"])
67
- return True
68
- else:
69
- return False
70
-
71
- def filter(example, uniques):
72
- if not check_uniques(example, uniques):
73
- return False
74
- else:
75
- return True
76
-
77
- if __name__ == "__main__":
78
- ds = load_dataset("codeparrot/github-jupyter-parsed", split="train")
79
- print("Parsing data...")
80
- markdowns, code_snippets, paths, repo_names, licenses = parse_data(ds)
81
- data = {"markdown": markdowns, "code": code_snippets, "path": paths, "repo_name": repo_names, "license": licenses}
82
- parsed_data = Dataset.from_dict(data)
83
-
84
- print("Deduplication...")
85
- parsed_data = parsed_data.map(preprocess)
86
- # Deduplicate hashes
87
- uniques = set(parsed_data.unique("hash"))
88
- frac = len(uniques) / len(parsed_data)
89
- print(f"Fraction of duplicates: {1-frac:.2%}")
90
- ds_filter = parsed_data.filter(filter, fn_kwargs={"uniques": uniques})
91
-
92
- ds_filter.push_to_hub("codeparrot/github-jupyter-text-code-pairs")