marksaroufim commited on
Commit
087591e
·
1 Parent(s): b54c1ac
Files changed (2) hide show
  1. output_dataset.parquet +1 -1
  2. par.py +19 -18
output_dataset.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a670b3dc111fe3c5c2522545ba62b74df0da6beb2b1d413ba574f983728ebdf
3
  size 22475137
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c49abfa2591aac4b75847bb0f31e4ed5581eb94727308af9c5c6c31657d4946c
3
  size 22475137
par.py CHANGED
@@ -8,44 +8,45 @@ import base64
8
 
9
  def encode_file(file_path):
10
  """Encode text files or base64 encode image files."""
11
- try:
12
- if file_path.endswith('.jpg'):
13
- with open(file_path, "rb") as image_file:
14
- return base64.b64encode(image_file.read()).decode('utf-8')
15
- else:
16
  with open(file_path, 'r', encoding='utf-8') as file:
17
  return file.read()
18
- except UnicodeDecodeError as e:
19
- print(f"Error decoding file {file_path}: {e}")
20
- return None
21
 
22
  def extract_images(markdown_content):
23
  """Extract PHOTO_IDs from markdown files and return as a list."""
24
  return re.findall(r'\{\{PHOTO_ID:(\d+)\|WIDTH:\d+\}\}', markdown_content)
25
 
26
  def collect_data(directory):
27
- data = []
28
  image_files = {re.search(r'(\d+)', filename).group(1): filename
29
  for filename in os.listdir(directory) if filename.endswith('.jpg')}
30
 
 
31
  for filename in os.listdir(directory):
32
  problem_id = filename.split('.')[0]
33
- if not any(d['Problem ID'] == problem_id for d in data):
34
- data.append({'Problem ID': problem_id, 'Images': [], 'in': None, 'out': None, 'cpp': None, 'md': None, 'sol.md': None})
35
-
36
- current_entry = next(item for item in data if item['Problem ID'] == problem_id)
37
  file_type = filename.split('.')[-1]
38
  file_path = os.path.join(directory, filename)
 
 
 
 
39
  if file_type in ['in', 'out', 'cpp', 'md', 'sol.md']:
40
  content = encode_file(file_path)
41
- if content is not None: # Make sure we successfully read the file
42
- current_entry[file_type] = content
43
  if file_type in ['md', 'sol.md']:
44
  image_ids = extract_images(content)
45
- current_entry['Images'] += [image_files[id] for id in image_ids if id in image_files]
46
- current_entry['Images'] = list(set(current_entry['Images'])) # Remove duplicates if any
47
 
48
- return data
49
 
50
  def create_parquet_file(data, output_file):
51
  df = pd.DataFrame(data)
 
8
 
9
  def encode_file(file_path):
10
  """Encode text files or base64 encode image files."""
11
+ if file_path.endswith('.jpg'):
12
+ with open(file_path, "rb") as image_file:
13
+ return base64.b64encode(image_file.read()).decode('utf-8')
14
+ else:
15
+ try:
16
  with open(file_path, 'r', encoding='utf-8') as file:
17
  return file.read()
18
+ except UnicodeDecodeError as e:
19
+ print(f"Error decoding file {file_path}: {e}")
20
+ return None
21
 
22
  def extract_images(markdown_content):
23
  """Extract PHOTO_IDs from markdown files and return as a list."""
24
  return re.findall(r'\{\{PHOTO_ID:(\d+)\|WIDTH:\d+\}\}', markdown_content)
25
 
26
  def collect_data(directory):
27
+ data = {}
28
  image_files = {re.search(r'(\d+)', filename).group(1): filename
29
  for filename in os.listdir(directory) if filename.endswith('.jpg')}
30
 
31
+ # Iterate over all files in the directory and organize them by problem ID
32
  for filename in os.listdir(directory):
33
  problem_id = filename.split('.')[0]
 
 
 
 
34
  file_type = filename.split('.')[-1]
35
  file_path = os.path.join(directory, filename)
36
+
37
+ if problem_id not in data:
38
+ data[problem_id] = {'Problem ID': problem_id, 'Images': [], 'in': None, 'out': None, 'cpp': None, 'md': None, 'sol.md': None}
39
+
40
  if file_type in ['in', 'out', 'cpp', 'md', 'sol.md']:
41
  content = encode_file(file_path)
42
+ if content is not None:
43
+ data[problem_id][file_type] = content
44
  if file_type in ['md', 'sol.md']:
45
  image_ids = extract_images(content)
46
+ data[problem_id]['Images'] += [image_files[id] for id in image_ids if id in image_files]
47
+ data[problem_id]['Images'] = list(set(data[problem_id]['Images'])) # Remove duplicates
48
 
49
+ return list(data.values())
50
 
51
  def create_parquet_file(data, output_file):
52
  df = pd.DataFrame(data)