File size: 4,226 Bytes
ab64b7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9b86a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import os
import subprocess
import pandas as pd
from datasets import Dataset

def remove_repo(path):
    subprocess.call(f'rm -rf {path}')

def download_git_or_zip(url, target_folder)->None:
    """
    download git repo or zip file
    under self.projects_path
    """
    
    if url.startswith("https://github.com"):
        subprocess.call(f"git clone {url}", cwd=target_folder, shell=True)
    else:
        subprocess.call(f"wget {url}", cwd=target_folder, shell=True)
        zip_name = url.split('/')[-1]
        subprocess.call(f"unzip {zip_name}", cwd=target_folder, shell=True)
        subprocess.call(f"rm -rf {zip_name}", cwd=target_folder, shell=True)

class data_generator:
    def __init__(self):
        self.dataset_columns = ["repo_name", "file_path", "content"]
        self.important_extension = ['.c','.cpp','.cxx','.cc','cp','CPP','c++','.h','.hpp']
        self.projects_path = "data/projects"
        self.data_path = "data/opensource_dataset.csv"
        
        targets = [
            ['Framework', 'fprime', "https://github.com/nasa/fprime"],
            ['comm', 'asio', "https://github.com/boostorg/asio"],
            ['parsing', 'tinyxml2', "https://github.com/leethomason/tinyxml2"],
            ['parsing', 'inifile-cpp', "https://github.com/Rookfighter/inifile-cpp"],
            ['numerical analysis', 'oneAPI-samples', "https://github.com/oneapi-src/oneAPI-samples"],
            ['comm', 'rticonnextdds-examples', "https://d2vkrkwbbxbylk.cloudfront.net/sites/default/files/rti-examples/bundles/rticonnextdds-examples/rticonnextdds-examples.zip"],
            ['comm', 'rticonnextdds-robot-helpers', "https://github.com/rticommunity/rticonnextdds-robot-helpers"],
            ['comm', 'rticonnextdds-getting-started', "https://github.com/rticommunity/rticonnextdds-getting-started"],
            ['comm', 'rticonnextdds-usecases', "https://github.com/rticommunity/rticonnextdds-usecases"],
            ['xyz', 'PROJ', "https://github.com/OSGeo/PROJ"],
        ]
        self.targets = pd.DataFrame(targets, columns=('categori','target_lib','data_source'))

        if not os.path.isdir(self.projects_path):
            os.makedirs(self.projects_path, exist_ok=True)

    def process_file(self, project_name:str, dir_name:str, file_path:str):
        """Processes a single file"""

        try:
            with open(file_path, "r", encoding="utf-8") as file:
                content = file.read()
                if content.strip().startswith('\n/*\nWARNING: THIS FILE IS AUTO-GENERATED'):
                    content=""
                elif content.strip().startswith('/*\nWARNING: THIS FILE IS AUTO-GENERATED'):
                    content=""
                
        except Exception:
            content=""

        return {
            "repo_name": project_name.replace('/','_'),
            "file_path": file_path,
            "content": content,
        }
    
    def read_repository_files(self, project_name:str)->pd.DataFrame:
        """
        project_name : str
        repo_df : pd.DataFrame
        """
        repo_df = pd.DataFrame(columns=self.dataset_columns)
        file_paths = []
        pwd = os.path.join(self.projects_path, project_name)
        for root, _, files in os.walk(pwd):
            for file in files:
                file_path = os.path.join(root, file)

                if file.endswith(tuple(self.important_extension)):
                    file_paths.append((os.path.dirname(root), file_path))
        
        print("#"*10, f"{project_name} Total file paths:{len(file_paths)}", "#"*10)

        for i, (dir_name, file_path) in enumerate(file_paths):
            file_content = self.process_file(project_name, dir_name, file_path)
            assert isinstance(file_content, dict)
            if file_content["content"] != "":
                tmp_df = pd.DataFrame.from_dict([file_content])
                repo_df = pd.concat([repo_df, tmp_df])
        if len(repo_df)==0:
            repo_df = {
                "repo_name": project_name,
                "file_path": "",
                "content": "",
            }
            repo_df = pd.DataFrame.from_dict([repo_df])
        assert isinstance(repo_df, pd.DataFrame)
        return repo_df