File size: 2,604 Bytes
2672696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import pandas as pd
import xarray as xr
from glob import glob
from typing import Optional, List

def extract_region_id(filepath: str) -> str:
    """Extract region ID from netCDF file attributes."""
    ds = xr.open_dataset(filepath)
    original_id = ds.attrs.get('original_id', '')
    ice_service = ds.attrs.get('ice_service', '')
    ds.close()
    parts = original_id.split('_')
    if ice_service == "dmi":
        return parts[-2] + "_" + parts[-1].split('.')[0]
    return parts[-4]

def load_split_data(splits: List[str]) -> pd.DataFrame:
    """Load and preprocess data from split directories."""
    dfs = []
    for split in splits:
        paths = glob(f"{split}/*.nc")
        split_df = pd.DataFrame(paths, columns=["path"])
        split_df["split"] = split
        dfs.append(split_df)
    
    df = pd.concat(dfs, ignore_index=True)
    df['date'] = pd.to_datetime(df['path'].str.extract(r'(\d{8}T\d{6})')[0], format='%Y%m%dT%H%M%S')
    df['ice_service'] = df['path'].str.extract(r'_(dmi|cis)_')[0]
    df['is_reference'] = df['path'].str.contains('reference')
    return df

def process_test_data(test_data: pd.DataFrame) -> pd.DataFrame:
    """Process test split data to pair inputs with references."""
    test_pairs = []
    for (date, ice_service), group in test_data.groupby(['date', 'ice_service']):
        input_file = group[~group['is_reference']]['path'].iloc[0]
        ref_file = group[group['is_reference']]['path'].iloc[0]
        test_pairs.append({
            'input_path': input_file,
            'reference_path': ref_file,
            'date': date,
            'ice_service': ice_service,
            'split': 'test'
        })
    return pd.DataFrame(test_pairs)

def create_summary_df() -> pd.DataFrame:
    """Create summary DataFrame with all samples."""
    splits = ["train", "test"]
    df = load_split_data(splits)
    
    # Process train data
    train_data = df[df['split'] == 'train'].copy()
    train_data['input_path'] = train_data['path']
    train_data['reference_path'] = None
    
    # Process test data
    test_data = process_test_data(df[df['split'] == 'test'])
    
    # Combine and add region IDs
    summary_df = pd.concat([
        train_data[['input_path', 'reference_path', 'date', 'ice_service', 'split']],
        test_data
    ])
    summary_df['region_id'] = summary_df['input_path'].apply(extract_region_id)
    
    return summary_df

def main():
    """Main function to generate metadata summary."""
    summary_df = create_summary_df()
    print("\nFinal Summary:")
    print(summary_df)

if __name__ == '__main__':
    main()