|
import io |
|
import json |
|
import os |
|
from pathlib import Path |
|
|
|
import pandas as pd |
|
from datasets import Dataset, DatasetDict |
|
from huggingface_hub import HfApi |
|
|
|
|
|
def serialize_dataframe(df): |
|
"""Convert DataFrame to string.""" |
|
buffer = io.StringIO() |
|
df.to_csv(buffer, index=False) |
|
return buffer.getvalue() |
|
|
|
|
|
def load_csv_safely(file_path): |
|
"""Load CSV file and convert to string.""" |
|
if os.path.exists(file_path) and os.path.getsize(file_path) > 0: |
|
df = pd.read_csv(file_path) |
|
return serialize_dataframe(df) |
|
return "" |
|
|
|
|
|
def load_json_safely(file_path): |
|
"""Load JSON/JSONL file and convert to string.""" |
|
if os.path.exists(file_path) and os.path.getsize(file_path) > 0: |
|
with open(file_path, "r") as f: |
|
if file_path.endswith(".jsonl"): |
|
data = [json.loads(line) for line in f if line.strip()] |
|
else: |
|
try: |
|
data = json.load(f) |
|
except json.JSONDecodeError: |
|
f.seek(0) |
|
data = [json.loads(line) for line in f if line.strip()] |
|
return json.dumps(data) |
|
return "" |
|
|
|
|
|
def upload_sequence(sequence_path, sequence_name, repo_id="ariakang/ADT-test"): |
|
"""Upload a single sequence to Hugging Face Hub.""" |
|
|
|
print(f"Starting upload process for sequence: {sequence_name}") |
|
|
|
|
|
api = HfApi() |
|
|
|
|
|
print("Uploading VRS files...") |
|
vrs_files = list(Path(sequence_path).glob("*.vrs")) |
|
print(f"Found VRS files:", [f.name for f in vrs_files]) |
|
|
|
vrs_info = [] |
|
for vrs_file in vrs_files: |
|
print(f"Uploading {vrs_file.name}...") |
|
path_in_repo = f"sequences/{sequence_name}/vrs_files/{vrs_file.name}" |
|
|
|
try: |
|
api.upload_file( |
|
path_or_fileobj=str(vrs_file), |
|
path_in_repo=path_in_repo, |
|
repo_id=repo_id, |
|
repo_type="dataset", |
|
) |
|
print(f"Uploaded {vrs_file.name}") |
|
vrs_info.append( |
|
{ |
|
"filename": vrs_file.name, |
|
"path": path_in_repo, |
|
"size_bytes": vrs_file.stat().st_size, |
|
} |
|
) |
|
except Exception as e: |
|
print(f"Error uploading {vrs_file.name}: {str(e)}") |
|
raise |
|
|
|
|
|
sequence_data = { |
|
"data_type": [], |
|
"data": [], |
|
"filename": [], |
|
} |
|
|
|
|
|
csv_files = [ |
|
"2d_bounding_box.csv", |
|
"3d_bounding_box.csv", |
|
"aria_trajectory.csv", |
|
"eyegaze.csv", |
|
"scene_objects.csv", |
|
] |
|
|
|
for file in csv_files: |
|
file_path = os.path.join(sequence_path, file) |
|
data = load_csv_safely(file_path) |
|
if data: |
|
sequence_data["data_type"].append("csv") |
|
sequence_data["data"].append(data) |
|
sequence_data["filename"].append(file) |
|
print(f"Loaded {file}") |
|
|
|
|
|
json_files = ["instances.json", "metadata.json"] |
|
for file in json_files: |
|
file_path = os.path.join(sequence_path, file) |
|
data = load_json_safely(file_path) |
|
if data: |
|
sequence_data["data_type"].append("json") |
|
sequence_data["data"].append(data) |
|
sequence_data["filename"].append(file) |
|
print(f"Loaded {file}") |
|
|
|
|
|
mps_path = os.path.join(sequence_path, "mps") |
|
if os.path.exists(mps_path): |
|
|
|
eye_gaze_path = os.path.join(mps_path, "eye_gaze") |
|
if os.path.exists(eye_gaze_path): |
|
data = load_csv_safely(os.path.join(eye_gaze_path, "general_eye_gaze.csv")) |
|
if data: |
|
sequence_data["data_type"].append("csv") |
|
sequence_data["data"].append(data) |
|
sequence_data["filename"].append("mps/eye_gaze/general_eye_gaze.csv") |
|
|
|
data = load_json_safely(os.path.join(eye_gaze_path, "summary.json")) |
|
if data: |
|
sequence_data["data_type"].append("json") |
|
sequence_data["data"].append(data) |
|
sequence_data["filename"].append("mps/eye_gaze/summary.json") |
|
|
|
|
|
slam_path = os.path.join(mps_path, "slam") |
|
if os.path.exists(slam_path): |
|
for file in ["closed_loop_trajectory.csv", "open_loop_trajectory.csv"]: |
|
data = load_csv_safely(os.path.join(slam_path, file)) |
|
if data: |
|
sequence_data["data_type"].append("csv") |
|
sequence_data["data"].append(data) |
|
sequence_data["filename"].append(f"mps/slam/{file}") |
|
|
|
data = load_json_safely(os.path.join(slam_path, "online_calibration.jsonl")) |
|
if data: |
|
sequence_data["data_type"].append("jsonl") |
|
sequence_data["data"].append(data) |
|
sequence_data["filename"].append("mps/slam/online_calibration.jsonl") |
|
|
|
|
|
sequence_data["data_type"].append("vrs_info") |
|
sequence_data["data"].append(json.dumps(vrs_info)) |
|
sequence_data["filename"].append("vrs_files_info.json") |
|
|
|
|
|
dataset_dict = DatasetDict({sequence_name: Dataset.from_dict(sequence_data)}) |
|
|
|
print("\nPushing dataset to hub...") |
|
dataset_dict.push_to_hub(repo_id=repo_id, private=True) |
|
|
|
|
|
readme_content = """--- |
|
language: |
|
- en |
|
license: |
|
- mit |
|
--- |
|
|
|
# ADT Dataset |
|
|
|
## Dataset Description |
|
This dataset contains Aria Digital Twin (ADT) sequences with various sensor data and annotations. |
|
|
|
## Usage Example |
|
```python |
|
from datasets import load_dataset |
|
import pandas as pd |
|
import json |
|
import io |
|
|
|
def deserialize_csv(csv_string): |
|
return pd.read_csv(io.StringIO(csv_string)) |
|
|
|
def deserialize_json(json_string): |
|
return json.loads(json_string) |
|
|
|
# Load the dataset |
|
dataset = load_dataset("ariakang/ADT-test") |
|
sequence = dataset["{sequence_name}"] |
|
|
|
# Get list of available files |
|
files = list(zip(sequence["filename"], sequence["data_type"])) |
|
print("Available files:", files) |
|
|
|
# Load specific data |
|
for i, (filename, data_type, data) in enumerate(zip( |
|
sequence["filename"], sequence["data_type"], sequence["data"] |
|
)): |
|
if data_type == "csv": |
|
df = deserialize_csv(data) |
|
print(f"Loaded CSV {filename}: {len(df)} rows") |
|
elif data_type in ["json", "jsonl"]: |
|
json_data = deserialize_json(data) |
|
print(f"Loaded JSON {filename}") |
|
elif data_type == "vrs_info": |
|
vrs_info = deserialize_json(data) |
|
print(f"VRS files: {[f['filename'] for f in vrs_info]}") |
|
``` |
|
|
|
## VRS Files |
|
VRS files are stored in: sequences/{sequence_name}/vrs_files/ |
|
""" |
|
|
|
api.upload_file( |
|
path_or_fileobj=readme_content.encode(), |
|
path_in_repo="README.md", |
|
repo_id=repo_id, |
|
repo_type="dataset", |
|
) |
|
|
|
return f"https://huggingface.co/datasets/{repo_id}" |
|
|
|
|
|
if __name__ == "__main__": |
|
sequence_path = "/Users/ariak/Documents/projectaria_tools_adt_data/Apartment_release_clean_seq131_M1292" |
|
sequence_name = "Apartment_release_clean_seq131_M1292" |
|
repo_url = upload_sequence(sequence_path, sequence_name) |
|
print(f"Dataset uploaded successfully to: {repo_url}") |
|
|