python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API handler modules"""
import os
import sys
import json
import uuid
import glob
import shutil
import threading
import datetime
from handlers.automl_handler import AutoMLHandler
from handlers import stateless_handlers
from handlers.stateless_handlers import check_read_access, check_write_access
from handlers.utilities import Code, read_network_config, run_system_command, download_ptm, VALID_DSTYPES, VALID_NETWORKS, AUTOML_DISABLED_NETWORKS
from handlers.chaining import create_job_contexts, infer_action_from_job
from handlers.ds_upload import DS_UPLOAD_TO_FUNCTIONS
from job_utils import executor as jobDriver
from job_utils.workflow_driver import on_new_job, on_delete_job
from specs_utils import csv_to_json_schema
# Helpers
def resolve_existence(user_id, kind, handler_id):
"""Return whether metadata.json exists or not"""
if kind not in ["dataset", "model"]:
return False
metadata_path = os.path.join(stateless_handlers.get_root(), user_id, kind + "s", handler_id, "metadata.json")
return os.path.exists(metadata_path)
def resolve_job_existence(user_id, kind, handler_id, job_id):
"""Return whether job_id.json exists in jobs_metadata folder or not"""
if kind not in ["dataset", "model"]:
return False
metadata_path = os.path.join(stateless_handlers.get_root(), user_id, kind + "s", handler_id, "jobs_metadata", job_id + ".json")
return os.path.exists(metadata_path)
def resolve_root(user_id, kind, handler_id):
"""Returns handler root"""
return os.path.join(stateless_handlers.get_root(), user_id, kind + "s", handler_id)
def resolve_metadata(user_id, kind, handler_id):
"""Reads metadata.json and return it's contents"""
metadata_path = os.path.join(resolve_root(user_id, kind, handler_id), "metadata.json")
if os.path.exists(metadata_path):
with open(metadata_path, "r", encoding='utf-8') as f:
metadata = json.load(f)
return metadata
return {}
def resolve_metadata_with_jobs(user_id, kind, handler_id):
"""Reads job_id.json in jobs_metadata folder and return it's contents"""
metadata = resolve_metadata(user_id, kind, handler_id)
if metadata:
metadata["jobs"] = []
job_metadatas_root = resolve_root(user_id, kind, handler_id) + "/jobs_metadata/"
for json_file in glob.glob(job_metadatas_root + "*.json"):
metadata["jobs"].append(stateless_handlers.load_json_data(json_file))
return metadata
return {}
def get_user_models(user_id):
"""Returns a list of models that are available for the given user_id"""
user_root = stateless_handlers.get_root() + f"{user_id}/models/"
models, ret_lst = [], []
if os.path.isdir(user_root):
models = os.listdir(user_root)
for model in models:
if resolve_existence(user_id, "model", model):
ret_lst.append(model)
return ret_lst
def get_user_datasets(user_id):
"""Returns a list of datasets that are available for the given user_id"""
user_root = stateless_handlers.get_root() + f"{user_id}/datasets/"
datasets, ret_lst = [], []
if os.path.isdir(user_root):
datasets = os.listdir(user_root)
for dataset in datasets:
if resolve_existence(user_id, "dataset", dataset):
ret_lst.append(dataset)
return ret_lst
def write_handler_metadata(user_id, kind, handler_id, metadata):
"""Writes metadata.json with the contents of metadata variable passed"""
metadata_file = os.path.join(resolve_root(user_id, kind, handler_id), "metadata.json")
with open(metadata_file, "w", encoding='utf-8') as f:
f.write(json.dumps(metadata, indent=4))
def get_dataset_actions(ds_type, ds_format):
"""Reads the dataset's network config and returns the valid actions of the given dataset type and format"""
actions_default = read_network_config(ds_type)["api_params"]["actions"]
# Define all anamolous formats where actions are not same as ones listed in the network config
TYPE_FORMAT_ACTIONS_MAP = {("object_detection", "raw"): [],
("object_detection", "coco_raw"): [],
("instance_segmentation", "raw"): [],
("instance_segmentation", "coco_raw"): [],
("semantic_segmentation", "raw"): [],
("semantic_segmentation", "unet"): []
}
actions_override = TYPE_FORMAT_ACTIONS_MAP.get((ds_type, ds_format), actions_default)
return actions_override
def nested_update(source, additions):
"""Merge one dictionary(additions) into another(source)"""
if not isinstance(additions, dict):
return source
for key, value in additions.items():
if isinstance(value, dict):
source[key] = nested_update(source[key], value)
else:
source[key] = value
return source
def is_job_automl(user_id, model_id, job_id):
"""Returns if the job is automl-based job or not"""
try:
root = resolve_root(user_id, "model", model_id)
jobdir = os.path.join(root, job_id)
automl_signature = os.path.join(jobdir, "controller.log")
return os.path.exists(automl_signature)
except:
return False
def is_request_automl(user_id, handler_id, parent_job_id, actions, kind):
"""Returns if the job requested is automl based train or not"""
handler_metadata = resolve_metadata(user_id, kind, handler_id)
if handler_metadata.get("automl_enabled", False) and actions == ["train"]:
return True
return False
# validate workspace
def validate_workspace():
"""Checks if the workspace directory is in the current structure or not"""
root = stateless_handlers.get_root()
# populate pre-existing models
user_dirs = os.listdir(root)
for user_id in user_dirs:
user_dir = os.path.join(root, user_id)
# sanity check #1
if not os.path.isdir(user_dir): # not a directory
print("Random files exist!! Wrong workspace structure, must only have user IDs", file=sys.stderr)
continue
# sanity check #2
try: # not a valid user id
uuid.UUID(user_id)
print("Directory not corresponding to a UUID", file=sys.stderr)
except:
continue
# get pre-existing datasets
user_datasets_path = os.path.join(user_dir, "datasets")
dir_contents = []
if os.path.isdir(user_datasets_path):
dir_contents = os.listdir(user_datasets_path)
# NOTE: Assumes pre-existing datasets have data already uploaded.
for content in dir_contents:
metadata_path = user_datasets_path + "/" + content + "/metadata.json"
if os.path.exists(metadata_path):
with open(metadata_path, "r", encoding='utf-8') as f:
meta_data = json.load(f)
dataset_id = meta_data.get("id", None)
if dataset_id is None:
continue
print(f"Found dataset: {dataset_id}", file=sys.stderr)
# Assert we have logs, specs, jobs_metadata folders. If not create all those...
stateless_handlers.make_root_dirs(user_id, "datasets", dataset_id)
# get pre-existing models
user_models_path = os.path.join(user_dir, "models")
dir_contents = []
if os.path.isdir(user_models_path):
dir_contents = os.listdir(user_models_path)
for content in dir_contents:
metadata_path = user_models_path + "/" + content + "/metadata.json"
if os.path.exists(metadata_path):
with open(metadata_path, "r", encoding='utf-8') as f:
meta_data = json.load(f)
model_id = meta_data.get("id", None)
if model_id is None:
continue
print(f"Found model: {model_id}", file=sys.stderr)
# Assert we have logs, specs, jobs_metadata folders. If not create all those...
stateless_handlers.make_root_dirs(user_id, "models", model_id)
def load_metadata_json(json_file):
"""Loads the json file provided"""
with open(json_file, "r", encoding='utf-8') as f:
metadata = json.load(f)
return metadata
class AppHandler:
"""
App Handler class
- Static class
"""
# Dataset API
@staticmethod
def list_datasets(user_id):
"""
user_id: str, uuid
Returns:
list(dict) - list of datasets accessible by user where each element is metadata of a dataset
"""
# Collect all metadatas
metadatas = []
for dataset_id in list(set(get_user_datasets(user_id) + stateless_handlers.get_public_datasets())):
metadatas.append(stateless_handlers.get_handler_metadata(dataset_id))
return metadatas
# Create dataset
@staticmethod
def create_dataset(user_id, request_dict):
"""
user_id: str, uuid
request_dict: dict following DatasetReqSchema
- type is required
- format is required
Returns:
- 201 with metadata of created dataset if successful
- 400 if dataset type and format not given
"""
# Create a dataset ID and its root
dataset_id = str(uuid.uuid4())
# Gather type,format fields from request
ds_type = request_dict.get("type", None)
ds_format = request_dict.get("format", None)
# Perform basic checks - valid type and format?
if ds_type not in VALID_DSTYPES:
msg = "Invalid dataset type"
return Code(400, {}, msg)
if ds_format not in read_network_config(ds_type)["api_params"]["formats"]:
msg = "Incompatible dataset format and type"
return Code(400, {}, msg)
if request_dict.get("public", False):
stateless_handlers.add_public_dataset(dataset_id)
dataset_actions = get_dataset_actions(ds_type, ds_format)
# Create metadata dict and create some initial folders
metadata = {"id": dataset_id,
"created_on": datetime.datetime.now().isoformat(),
"last_modified": datetime.datetime.now().isoformat(),
"name": request_dict.get("name", "My Dataset"),
"description": request_dict.get("description", "My TAO Dataset"),
"version": request_dict.get("version", "1.0.0"),
"logo": request_dict.get("logo", "https://www.nvidia.com"),
"type": ds_type,
"format": ds_format,
"actions": dataset_actions
}
stateless_handlers.make_root_dirs(user_id, "datasets", dataset_id)
write_handler_metadata(user_id, "dataset", dataset_id, metadata)
# Read this metadata from saved file...
return_metadata = resolve_metadata_with_jobs(user_id, "dataset", dataset_id)
ret_Code = Code(201, return_metadata, "Dataset created")
return ret_Code
# Update existing dataset for user based on request dict
@staticmethod
def update_dataset(user_id, dataset_id, request_dict):
"""
user_id: str, uuid
dataset_id: str, uuid
request_dict: dict following DatasetReqSchema
- type is required
- format is required
Returns:
Code object
- 200 with metadata of updated dataset if successful
- 404 if dataset not found / user cannot access
- 400 if invalid update
"""
if not resolve_existence(user_id, "dataset", dataset_id):
return Code(404, {}, "Dataset not found")
if not check_write_access(user_id, dataset_id):
return Code(404, {}, "Dataset not available")
if request_dict.get("public", None):
if request_dict["public"]:
stateless_handlers.add_public_dataset(dataset_id)
else:
stateless_handlers.remove_public_dataset(dataset_id)
metadata = resolve_metadata(user_id, "dataset", dataset_id)
for key in request_dict.keys():
# Cannot process the update, so return 400
if key in ["type", "format"]:
if request_dict[key] != metadata.get(key):
msg = f"Cannot change dataset {key}"
return Code(400, {}, msg)
if key in ["name", "description", "version", "logo"]:
requested_value = request_dict[key]
if requested_value is not None:
metadata[key] = requested_value
metadata["last_modified"] = datetime.datetime.now().isoformat()
write_handler_metadata(user_id, "dataset", dataset_id, metadata)
# Read this metadata from saved file...
return_metadata = resolve_metadata_with_jobs(user_id, "dataset", dataset_id)
ret_Code = Code(200, return_metadata, "Dataset updated")
return ret_Code
# Retrieve existing dataset for user based on request dict
@staticmethod
def retrieve_dataset(user_id, dataset_id):
"""
user_id: str, uuid
dataset_id: str, uuid
Returns:
Code object
- 200 with metadata of retrieved dataset if successful
- 404 if dataset not found / user cannot access
"""
if not resolve_existence(user_id, "dataset", dataset_id):
return Code(404, {}, "Dataset not found")
if not check_read_access(user_id, dataset_id):
return Code(404, {}, "Dataset not found")
return_metadata = resolve_metadata_with_jobs(user_id, "dataset", dataset_id)
return Code(200, return_metadata, "Dataset retrieved")
# Delete a user's dataset
@staticmethod
def delete_dataset(user_id, dataset_id):
"""
user_id: str, uuid
dataset_id: str, uuid
Returns:
Code object
- 200 with metadata of deleted dataset if successful
- 404 if dataset not found / user cannot access
- 400 if dataset has running jobs / being used by a model and hence cannot be deleted
"""
if not resolve_existence(user_id, "dataset", dataset_id):
return Code(404, {}, "Dataset not found")
if dataset_id not in get_user_datasets(user_id):
return Code(404, {}, "Dataset cannot be deleted")
# If dataset is being used by user's models.
metadata_file_pattern = stateless_handlers.get_root() + f"{user_id}/models/**/metadata.json"
metadata_files = glob.glob(metadata_file_pattern)
for metadata_file in metadata_files:
metadata = load_metadata_json(metadata_file)
train_datasets = metadata.get("train_datasets", [])
if type(train_datasets) != list:
train_datasets = [train_datasets]
if dataset_id in metadata.get("train_datasets", []) + \
[metadata.get("eval_dataset", None),
metadata.get("inference_dataset", None),
metadata.get("calibration_dataset", None)]:
return Code(400, {}, "Dataset in use")
# Check if any job running
return_metadata = resolve_metadata_with_jobs(user_id, "dataset", dataset_id)
for job in return_metadata["jobs"]:
if job["status"] == "Running":
return Code(400, {}, "Dataset in use")
# Check if dataset is public, then someone could be running it
if return_metadata.get("public", False):
return Code(400, {}, "Dataset is Public. Cannot delete")
# Check if dataset is read only, if yes, cannot delete
if return_metadata.get("read_only", False):
return Code(400, {}, "Dataset is read only. Cannot delete")
# Remove metadata file to signify deletion
os.remove(stateless_handlers.get_handler_metadata_file(dataset_id))
# Remove the whole folder as a Daemon...
deletion_command = f"rm -rf {stateless_handlers.get_handler_root(dataset_id)}"
delete_thread = threading.Thread(target=run_system_command, args=(deletion_command,))
delete_thread.start()
return Code(200, return_metadata, "Dataset deleted")
@staticmethod
def upload_dataset(user_id, dataset_id, file_tgz):
"""
user_id: str, uuid
dataset_id: str, uuid
file_tgz: Flask request.files["file"]
Returns:
Code object
- 201 with {} if successful
- 404 if dataset not found / user cannot access
- 400 if upload validation fails
"""
if not resolve_existence(user_id, "dataset", dataset_id):
return Code(404, {}, "Dataset not found")
if not check_write_access(user_id, dataset_id):
return Code(404, {}, "Dataset not available")
# Save tar file at the dataset root
tar_path = os.path.join(stateless_handlers.get_handler_root(dataset_id), "data.tar.gz")
file_tgz.save(tar_path)
metadata = resolve_metadata(user_id, "dataset", dataset_id)
print("Uploading dataset to server", file=sys.stderr)
return_Code = DS_UPLOAD_TO_FUNCTIONS[metadata.get("type")](tar_path, metadata)
print("Uploading complete", file=sys.stderr)
return return_Code
# Spec API
@staticmethod
def get_spec_schema(user_id, handler_id, action, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
action: str, a valid Action for a dataset
kind: str, one of ["model","dataset"]
Returns:
Code object
- 200 with spec in a json-schema format
- 404 if model/dataset not found / user cannot access
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, {}, "Spec schema not found")
if not check_read_access(user_id, handler_id):
return Code(404, {}, "Spec schema not available")
metadata = resolve_metadata(user_id, kind, handler_id)
# Action not available
if action not in metadata.get("actions", []):
return Code(404, {}, "Action not found")
# Read csv from spec_utils/specs/<network_name>/action.csv
# Convert to json schema
json_schema = {}
DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
network = metadata.get("network_arch", None)
if not network:
# Used for data conversion
network = metadata.get("type", None)
# Try regular format for CSV_PATH => "<network> - <action>.csv"
CSV_PATH = os.path.join(DIR_PATH, "specs_utils", "specs", network, f"{network} - {action}.csv")
if not os.path.exists(CSV_PATH):
# Try secondary format for CSV_PATH => "<network> - <action>__<dataset-format>.csv"
fmt = metadata.get("format", "_")
CSV_PATH = os.path.join(DIR_PATH, "specs_utils", "specs", network, f"{network} - {action}__{fmt}.csv")
if not os.path.exists(CSV_PATH):
Code(404, {}, "Default specs do not exist for action")
inferred_class_names = []
# If class-wise config is applicable
if read_network_config(network)["api_params"]["classwise"] == "True":
# For each train dataset for the model
metadata = resolve_metadata(user_id, kind, handler_id)
for train_ds in metadata.get("train_datasets", []):
# Obtain class list from classes.json
classes_json = os.path.join(stateless_handlers.get_handler_root(train_ds), "classes.json")
if not os.path.exists(classes_json):
continue
with open(classes_json, "r", encoding='utf-8') as f:
inferred_class_names += json.loads(f.read()) # It is a list
inferred_class_names = list(set(inferred_class_names))
json_schema = csv_to_json_schema.convert(CSV_PATH, inferred_class_names)
return Code(200, json_schema, "Schema retrieved")
@staticmethod
def get_spec(user_id, handler_id, action, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
action: str, a valid Action for a dataset
kind: str, one of ["model","dataset"]
Returns:
Code object
- 200 with spec in a json format
- 404 if model/dataset not found / user cannot access
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, {}, "Spec not found")
if not check_read_access(user_id, handler_id):
return Code(404, {}, "Spec not available")
metadata = resolve_metadata(user_id, kind, handler_id)
# Action not available
if action not in metadata.get("actions", []):
return Code(404, {}, "Action not found")
# read spec from action.json
action_spec_path = stateless_handlers.get_handler_spec_root(handler_id) + f"/{action}.json"
if os.path.exists(action_spec_path):
data = {}
with open(action_spec_path, mode='r', encoding='utf-8-sig') as f:
data = json.load(f)
msg = "Spec retrieved"
return Code(200, data, msg)
msg = "Spec not found"
return Code(404, {}, msg)
@staticmethod
def save_spec(user_id, handler_id, action, request_dict, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
action: str, a valid Action for a dataset
request_dict: specs given by user
kind: str, one of ["model","dataset"]
Returns:
Code object
- 201 with posted spec in a json format
- 400 if invalid update
- 404 if model/dataset not found / user cannot access
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, {}, "Not found")
if not check_write_access(user_id, handler_id):
return Code(404, {}, "Not available")
metadata = resolve_metadata(user_id, kind, handler_id)
# Action not available
if action not in metadata.get("actions", []):
return Code(404, {}, "Action not found")
# save the request_dict inside action.json
action_spec_path = stateless_handlers.get_handler_spec_root(handler_id) + f"/{action}.json"
metadata["last_modified"] = datetime.datetime.now().isoformat()
write_handler_metadata(user_id, kind, handler_id, metadata)
# Get specs schema
schema_ret = AppHandler.get_spec_schema(user_id, handler_id, action, kind)
if schema_ret.code != 200:
msg = "Schema not found"
return Code(404, {}, msg)
# Harden and validate specs
# try:
# print(request_dict, "\n", file=sys.stderr)
# print(schema, "\n", file=sys.stderr)
# hardened_spec = specCheck.harden(request_dict,schema)
# print(hardened_spec, "\n", file=sys.stderr)
# failed_spec = specCheck.validate(hardened_spec,schema)
# print(failed_spec, "\n", file=sys.stderr)
# if failed_spec:
# return Code(404,{},failed_spec)
# else:
# request_dict = hardened_spec.copy()
# except Exception as e:
# return Code(404,{},str(e))
with open(action_spec_path, "w", encoding='utf-8') as f:
request_json_string = json.dumps(request_dict, indent=4)
f.write(request_json_string)
msg = "Spec saved"
return Code(201, request_dict, msg)
@staticmethod
def update_spec(user_id, handler_id, action, request_dict, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
action: str, a valid Action for a dataset
request_dict: specs given by user
kind: str, one of ["model","dataset"]
Returns:
Code object
- 201 with posted spec in a json format
- 400 if invalid update
- 404 if model/dataset not found / user cannot access
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, {}, "Not found")
if not check_write_access(user_id, handler_id):
return Code(404, {}, "Not available")
metadata = resolve_metadata(user_id, kind, handler_id)
# Action not available
if action not in metadata.get("actions", []):
return Code(404, {}, "Action not found")
# read spec from action.json
action_spec_path = stateless_handlers.get_handler_spec_root(handler_id) + f"/{action}.json"
if os.path.exists(action_spec_path):
data = {}
with open(action_spec_path, mode='r', encoding='utf-8-sig') as f:
data = json.load(f)
print("Data", data, file=sys.stderr)
try:
nested_update(data, request_dict)
print("Data", data, file=sys.stderr)
return_code = AppHandler.save_spec(user_id, handler_id, action, data, kind)
if return_code.code == 201:
msg = "Spec retrieved"
return Code(200, data, msg)
msg = "Specs save failed"
return Code(400, {}, msg)
except:
msg = "Specs cannot be updated, check the request"
return Code(400, {}, msg)
# if it doesn't exist, error
else:
msg = "Spec not found"
return Code(404, {}, msg)
# Job API
@staticmethod
def job_run(user_id, handler_id, parent_job_id, actions, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
parent_job_id: str, uuid
actions: list(str), each action corresponds to a valid action
kind: str, one of ["model","dataset"]
Returns:
201 with list(str) where each str is a uuid for job if jobs successfully queued
404 with [] if dataset/model/parent_job_id/actions not found
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, [], f"{kind} not found")
if not check_write_access(user_id, handler_id):
return Code(404, [], f"{kind} not found")
if parent_job_id:
if not resolve_job_existence(user_id, kind, handler_id, parent_job_id):
return Code(404, [], "job not found")
if len(actions) == 0:
return Code(404, [], "action not found")
handler_metadata = resolve_metadata(user_id, kind, handler_id)
if "ptm" in handler_metadata.keys():
ptm_ids = handler_metadata["ptm"]
for ptm_id in ptm_ids:
if ptm_id:
if stateless_handlers.get_handler_metadata(ptm_id).get("ngc_path", None):
download_ptm(ptm_id)
# job_run_thread = threading.Thread(target=download_ptm,args=(ptm_id,))
# job_run_thread.start()
if is_request_automl(user_id, handler_id, parent_job_id, actions, kind):
return AutoMLHandler.start(user_id, handler_id, handler_metadata)
try:
job_ids = [str(uuid.uuid4()) for i in actions]
job_contexts = create_job_contexts(parent_job_id, actions, job_ids, handler_id)
on_new_job(job_contexts)
return Code(201, job_ids, "Jobs scheduled")
except:
return Code(404, [], "action not found")
@staticmethod
def job_list(user_id, handler_id, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
kind: str, one of ["model","dataset"]
Returns:
200, list(dict) - each dict follows JobResultSchema if found
404, [] if not found
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, [], f"{kind} not found")
if not check_read_access(user_id, handler_id):
return Code(404, [], f"{kind} not found")
return_metadata = resolve_metadata_with_jobs(user_id, kind, handler_id).get("jobs", [])
return Code(200, return_metadata, "Jobs retrieved")
@staticmethod
def job_cancel(user_id, handler_id, job_id, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
job_id: str, uuid corresponding to job to be cancelled
kind: str, one of ["model","dataset"]
Returns:
200, [job_id] - if job can be cancelled
404, [] if not found
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, [], f"{kind} not found")
if not check_write_access(user_id, handler_id):
return Code(404, [], f"{kind} not found")
if not resolve_job_existence(user_id, kind, handler_id, job_id):
return Code(404, [], "job not found")
if is_job_automl(user_id, handler_id, job_id):
return AutoMLHandler.stop(user_id, handler_id, job_id)
# If job is error / done, then cancel is NoOp
job_metadata = stateless_handlers.get_handler_job_metadata(handler_id, job_id)
job_status = job_metadata.get("status", "Error")
if job_status in ["Error", "Done"]:
return Code(404, [], "incomplete job not found")
if job_status == "Pending":
on_delete_job(handler_id, job_id)
stateless_handlers.update_job_status(handler_id, job_id, status="Error")
return Code(200, [job_id], "job cancelled")
if job_status == "Running":
try:
# Delete K8s job
jobDriver.delete(job_id)
stateless_handlers.update_job_status(handler_id, job_id, status="Error")
return Code(200, [job_id], "job cancelled")
except:
return Code(404, [], "job not found in platform")
else:
return Code(404, [], "job status not found")
@staticmethod
def job_retrieve(user_id, handler_id, job_id, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
job_id: str, uuid corresponding to job to be retrieved
kind: str, one of ["model","dataset"]
Returns:
200, dict following JobResultSchema - if job found
404, {} if not found
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, {}, "Dataset not found")
if not check_read_access(user_id, handler_id):
return Code(404, {}, "Dataset not found")
if not resolve_job_existence(user_id, kind, handler_id, job_id):
return Code(404, {}, "Job not found")
if is_job_automl(user_id, handler_id, job_id):
return AutoMLHandler.retrieve(user_id, handler_id, job_id)
path = os.path.join(stateless_handlers.get_root(), user_id, kind + "s", handler_id, "jobs_metadata", job_id + ".json")
job_meta = stateless_handlers.load_json_data(path)
return Code(200, job_meta, "Job retrieved")
# Delete job
@staticmethod
def job_delete(user_id, handler_id, job_id, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
job_id: str, uuid corresponding to job to be deleted
kind: str, one of ["model","dataset"]
Returns:
200, [job_id] - if job can be deleted
404, [] if not found
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, [], f"{kind} not found")
if not check_write_access(user_id, handler_id):
return Code(404, [], f"{kind} not found")
if not resolve_job_existence(user_id, kind, handler_id, job_id):
return Code(404, [], "job not found")
try:
# If job is running, cannot delete
job_metadata = stateless_handlers.get_handler_job_metadata(handler_id, job_id)
if job_metadata.get("status", "Error") in ["Running", "Pending"]:
return Code(400, [], "job cannot be deleted")
# Delete job metadata
job_metadata_path = os.path.join(stateless_handlers.get_handler_jobs_metadata_root(handler_id), job_id + ".json")
if os.path.exists(job_metadata_path):
os.remove(job_metadata_path)
# Delete job logs
job_log_path = os.path.join(stateless_handlers.get_handler_log_root(handler_id), job_id + ".txt")
if os.path.exists(job_log_path):
os.remove(job_log_path)
# Delete the job directory in the background
deletion_command = "rm -rf " + os.path.join(stateless_handlers.get_handler_root(handler_id), job_id)
targz_path = os.path.join(stateless_handlers.get_handler_root(handler_id), job_id + ".tar.gz")
if os.path.exists(targz_path):
deletion_command += "; rm -rf " + targz_path
delete_thread = threading.Thread(target=run_system_command, args=(deletion_command,))
delete_thread.start()
return Code(200, [job_id], "job deleted")
except:
return Code(400, [], "job cannot be deleted")
# Download model job
@staticmethod
def job_download(user_id, handler_id, job_id, kind):
"""
user_id: str, uuid
handler_id: str, uuid corresponding to model/dataset
job_id: str, uuid corresponding to job to be deleted
kind: str, one of ["model","dataset"]
Returns:
200, Path to a tar.gz created from the job directory
404, None if not found
"""
if not resolve_existence(user_id, kind, handler_id):
return Code(404, None, f"{kind} not found")
if not check_read_access(user_id, handler_id):
return Code(404, None, f"{kind} not found")
if not resolve_job_existence(user_id, kind, handler_id, job_id):
return Code(404, None, "job not found")
try:
root = stateless_handlers.get_handler_root(handler_id)
# Copy job logs from root/logs/<job_id>.txt to root/<job_id>/logs_from_toolkit.txt
job_log_path = stateless_handlers.get_handler_log_root(handler_id) + f"/{job_id}.txt"
if os.path.exists(job_log_path):
shutil.copy(job_log_path, stateless_handlers.get_handler_root(handler_id) + f"/{job_id}/logs_from_toolkit.txt")
out_tar = os.path.join(root, job_id + ".tar.gz")
command = f"cd {root} ; tar -zcvf {job_id}.tar.gz {job_id} ; cd -"
run_system_command(command)
if os.path.exists(out_tar):
return Code(200, out_tar, "job deleted")
return Code(404, None, "job output not found")
except:
return Code(404, None, "job output not found")
# Model API
@staticmethod
def list_models(user_id):
"""
user_id: str, uuid
Returns:
list(dict) - list of models accessible by user where each element is metadata of a model
"""
# Collect all metadatas
metadatas = []
for model_id in list(set(get_user_models(user_id) + stateless_handlers.get_public_models())):
metadatas.append(stateless_handlers.get_handler_metadata(model_id))
return metadatas
@staticmethod
def create_model(user_id, request_dict):
"""
user_id: str, uuid
request_dict: dict following ModelReqSchema
- network_arch is required
- encryption_key is required (not enforced)
Returns:
- 201 with metadata of created model if successful
- 400 if model type and format not given
"""
# Create a dataset ID and its root
model_id = str(uuid.uuid4())
# Gather type,format fields from request
mdl_nw = request_dict.get("network_arch", None)
# Perform basic checks - valid type and format?
if mdl_nw not in VALID_NETWORKS:
msg = "Invalid network arch"
return Code(400, {}, msg)
if request_dict.get("public", False):
stateless_handlers.add_public_model(model_id)
# Create metadata dict and create some initial folders
# Initially make datasets, ptm None
metadata = {"id": model_id,
"created_on": datetime.datetime.now().isoformat(),
"last_modified": datetime.datetime.now().isoformat(),
"name": request_dict.get("name", "My Model"),
"description": request_dict.get("description", "My TAO Model"),
"version": request_dict.get("version", "1.0.0"),
"logo": request_dict.get("logo", "https://www.nvidia.com"),
"ngc_path": request_dict.get("ngc_path", ""),
"encryption_key": request_dict.get("encryption_key", "tlt_encode"),
"read_only": request_dict.get("read_only", False),
"public": request_dict.get("public", False),
"network_arch": mdl_nw,
"dataset_type": read_network_config(mdl_nw)["api_params"]["dataset_type"],
"actions": read_network_config(mdl_nw)["api_params"]["actions"],
"train_datasets": [],
"eval_dataset": None,
"inference_dataset": None,
"additional_id_info": None,
"calibration_dataset": None,
"ptm": [],
"automl_enabled": False,
"automl_algorithm": None,
"metric": None,
"automl_add_hyperparameters": "",
"automl_remove_hyperparameters": ""
}
if request_dict.get("automl_enabled", False):
if mdl_nw not in AUTOML_DISABLED_NETWORKS:
metadata["automl_enabled"] = True
metadata["automl_algorithm"] = request_dict.get("automl_algorithm", "Bayesian")
metadata["metric"] = request_dict.get("metric", "map")
metadata["automl_add_hyperparameters"] = request_dict.get("automl_add_hyperparameters", "")
metadata["automl_remove_hyperparameters"] = request_dict.get("automl_remove_hyperparameters", "")
# AutoML optional params
if request_dict.get("automl_max_recommendations"):
metadata["automl_max_recommendations"] = request_dict.get("automl_max_recommendations")
if request_dict.get("automl_delete_intermediate_ckpt"):
metadata["automl_delete_intermediate_ckpt"] = request_dict.get("automl_delete_intermediate_ckpt")
if request_dict.get("automl_R"):
metadata["automl_R"] = request_dict.get("automl_R")
if request_dict.get("automl_nu"):
metadata["automl_nu"] = request_dict.get("automl_nu")
if request_dict.get("epoch_multiplier"):
metadata["epoch_multiplier"] = request_dict.get("epoch_multiplier")
else:
return Code(400, {}, "automl_enabled cannot be True for unsupported network")
# Update datasets and ptms if given
for key in ["train_datasets", "eval_dataset", "inference_dataset", "calibration_dataset", "ptm"]:
if key not in request_dict.keys():
continue
value = request_dict[key]
if stateless_handlers.model_update_handler_attributes(user_id, metadata, key, value):
metadata[key] = value
else:
return Code(400, {}, f"Provided {key} cannot be added")
# Actual "creation" happens here...
stateless_handlers.make_root_dirs(user_id, "models", model_id)
write_handler_metadata(user_id, "model", model_id, metadata)
# Read this metadata from saved file...
return_metadata = resolve_metadata_with_jobs(user_id, "model", model_id)
ret_Code = Code(201, return_metadata, "Model created")
return ret_Code
# Update existing model for user based on request dict
@staticmethod
def update_model(user_id, model_id, request_dict):
"""
user_id: str, uuid
model_id: str, uuid
request_dict: dict following ModelReqSchema
Returns:
- 200 with metadata of updated model if successful
- 404 if model not found / user cannot access
- 400 if invalid update / model is read only
"""
if not resolve_existence(user_id, "model", model_id):
return Code(400, {}, "Does not exist")
if not check_write_access(user_id, model_id):
return Code(400, {}, "Does not exist")
# if public is set to True => add it to public_models, if it is set to False => take it down
# if public is not there, do nothing
if request_dict.get("public", None):
if request_dict["public"]:
stateless_handlers.add_public_model(model_id)
else:
stateless_handlers.remove_public_model(model_id)
metadata = resolve_metadata(user_id, "model", model_id)
for key in request_dict.keys():
# Cannot process the update, so return 400
if key in ["network_arch"]:
if request_dict[key] != metadata.get(key):
msg = f"Cannot change model {key}"
return Code(400, {}, msg)
if key in ["name", "description", "version", "logo",
"ngc_path", "encryption_key", "read_only", "public"]:
requested_value = request_dict[key]
if requested_value is not None:
metadata[key] = requested_value
metadata["last_modified"] = datetime.datetime.now().isoformat()
if key in ["train_datasets", "eval_dataset", "inference_dataset", "calibration_dataset", "ptm"]:
value = request_dict[key]
if stateless_handlers.model_update_handler_attributes(user_id, metadata, key, value):
metadata[key] = value
else:
return Code(400, {}, f"Provided {key} cannot be added")
if key in ["automl_enabled"]:
value = request_dict[key]
# If False, can set. If True, need to check if AutoML is supported
if value:
mdl_nw = metadata.get("network_arch", "")
if mdl_nw not in AUTOML_DISABLED_NETWORKS:
metadata[key] = True
metadata["automl_algorithm"] = request_dict.get("automl_algorithm", "Bayesian")
metadata["metric"] = request_dict.get("metric", "map")
metadata["automl_add_hyperparameters"] = request_dict.get("automl_add_hyperparameters", "")
metadata["automl_remove_hyperparameters"] = request_dict.get("automl_remove_hyperparameters", "")
# AutoML optional params
if request_dict.get("automl_max_recommendations"):
metadata["automl_max_recommendations"] = request_dict.get("automl_max_recommendations")
if request_dict.get("automl_delete_intermediate_ckpt"):
metadata["automl_delete_intermediate_ckpt"] = request_dict.get("automl_delete_intermediate_ckpt")
if request_dict.get("automl_R"):
metadata["automl_R"] = request_dict.get("automl_R")
if request_dict.get("automl_nu"):
metadata["automl_nu"] = request_dict.get("automl_nu")
if request_dict.get("epoch_multiplier"):
metadata["epoch_multiplier"] = request_dict.get("epoch_multiplier")
else:
return Code(400, {}, "automl_enabled cannot be True for unsupported network")
else:
metadata[key] = value
write_handler_metadata(user_id, "model", model_id, metadata)
# Read this metadata from saved file...
return_metadata = resolve_metadata_with_jobs(user_id, "model", model_id)
ret_Code = Code(200, return_metadata, "Model updated")
return ret_Code
@staticmethod
def retrieve_model(user_id, model_id):
"""
user_id: str, uuid
model_id: str, uuid
Returns:
- 200 with metadata of retrieved model if successful
- 404 if model not found / user cannot access
"""
if not resolve_existence(user_id, "model", model_id):
return Code(404, {}, "Model not found")
if not check_read_access(user_id, model_id):
return Code(404, {}, "Model not found")
return_metadata = resolve_metadata_with_jobs(user_id, "model", model_id)
return Code(200, return_metadata, "Model retrieved")
@staticmethod
def delete_model(user_id, model_id):
"""
user_id: str, uuid
model_id: str, uuid
Returns:
Code object
- 200 with metadata of deleted model if successful
- 404 if model not found / user cannot access
- 400 if mdoel has running jobs / being used and hence cannot be deleted
"""
if not resolve_existence(user_id, "model", model_id):
return Code(404, {}, "Model not found")
if model_id not in get_user_models(user_id):
return Code(404, {}, "Model cannot be deleted")
# If model is being used by user's models.
metadata_file_pattern = stateless_handlers.get_root() + f"{user_id}/models/**/metadata.json"
metadata_files = glob.glob(metadata_file_pattern)
for metadata_file in metadata_files:
metadata = load_metadata_json(metadata_file)
if model_id == metadata.get("ptm", None):
return Code(400, {}, "Model in use as a ptm")
# Check if any job running
return_metadata = resolve_metadata_with_jobs(user_id, "model", model_id)
for job in return_metadata["jobs"]:
if job["status"] == "Running":
return Code(400, {}, "Model in use")
# Check if model is public, then someone could be running it
if return_metadata.get("public", False):
return Code(400, {}, "Model is Public. Cannot delete")
# Check if model is read only, if yes, cannot delete
if return_metadata.get("read_only", False):
return Code(400, {}, "Model is read only. Cannot delete")
# Remove metadata file to signify deletion
os.remove(stateless_handlers.get_handler_metadata_file(model_id))
# Remove the whole folder as a Daemon...
deletion_command = f"rm -rf {stateless_handlers.get_handler_root(model_id)}"
delete_thread = threading.Thread(target=run_system_command, args=(deletion_command,))
delete_thread.start()
return Code(200, return_metadata, "Model deleted")
@staticmethod
def resume_model_job(user_id, model_id, job_id, kind):
"""
user_id: str, uuid
model_id: str, uuid corresponding to model
job_id: str, uuid corresponding to a train job
Returns:
201 with [job_id] if job resumed and added to queue
400 with [] if job_id does not correspond to a train action or if it cannot be resumed
404 with [] if model/job_id not found
"""
if not resolve_existence(user_id, "model", model_id):
return Code(404, [], "Model not found")
if not check_write_access(user_id, model_id):
return Code(404, [], "Model not found")
action = infer_action_from_job(model_id, job_id)
if action != "train":
return Code(400, [], "Action not train")
handler_metadata = resolve_metadata(user_id, kind, model_id)
if is_job_automl(user_id, model_id, job_id):
return AutoMLHandler.resume(user_id, model_id, job_id, handler_metadata)
try:
# Create a job and run it
job_contexts = create_job_contexts(None, ["train"], [job_id], model_id)
on_new_job(job_contexts)
return Code(200, [job_id], "Action resumed")
except:
return Code(400, [], "Action cannot be resumed")
| tao_front_end_services-main | api/handlers/app_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a dictionary mapping docker image to an internal tag"""
import os
DOCKER_IMAGE_MAPPER = {
"tlt-tf1": os.getenv('IMAGE_TF1', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-tf1.15.5'),
"tlt-pytorch": os.getenv('IMAGE_PYT', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-pyt'),
"tlt-tf2": os.getenv('IMAGE_TF2', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-tf2.11.0'),
"tao-deploy": os.getenv('IMAGE_TAO_DEPLOY', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-deploy'),
"": os.getenv('IMAGE_DEFAULT', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-tf1.15.5'), # Default
"api": os.getenv('IMAGE_API', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-api'),
"tao-ds": os.getenv('IMAGE_DATA_SERVICES', default='nvcr.io/nvidia/tao/tao-toolkit:5.0.0-data-services')
}
| tao_front_end_services-main | api/handlers/docker_images.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to infer data sources"""
import os
import glob
import json
import sys
from handlers.stateless_handlers import get_handler_root, get_handler_job_metadata
def detectnet_v2(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Detecnet v2"""
# Creates data sources based on what is available
# if eval is not given, train could fail
# this is because by defn, we want to use all "train" data for learning
# a model
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
config["dataset_config"]["data_sources"] = []
for train_ds in handler_metadata.get("train_datasets", []):
ds_source_dict = {}
ds_source_dict["tfrecords_path"] = get_handler_root(train_ds) + "/tfrecords/*"
ds_source_dict["image_directory_path"] = get_handler_root(train_ds) + "/"
config["dataset_config"]["data_sources"].append(ds_source_dict)
if config["dataset_config"].get("validation_fold") is not None:
del config["dataset_config"]["validation_fold"]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["dataset_config"]["validation_data_source"] = {}
config["dataset_config"]["validation_data_source"]["tfrecords_path"] = get_handler_root(eval_ds) + "/tfrecords/*"
config["dataset_config"]["validation_data_source"]["image_directory_path"] = get_handler_root(eval_ds) + "/"
return config
def unet(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Unet"""
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
for train_ds in handler_metadata.get("train_datasets", []):
config["dataset_config"]["train_masks_path"] = get_handler_root(train_ds) + "/masks/train"
config["dataset_config"]["train_images_path"] = get_handler_root(train_ds) + "/images/train"
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["dataset_config"]["val_masks_path"] = get_handler_root(eval_ds) + "/masks/val"
config["dataset_config"]["val_images_path"] = get_handler_root(eval_ds) + "/images/val"
# Infer dataset
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is not None:
config["dataset_config"]["test_images_path"] = get_handler_root(infer_ds) + "/images/test"
return config
def segformer(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Segformer"""
# Init
if "dataset" not in list(config.keys()):
config["dataset"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
for train_ds in handler_metadata.get("train_datasets", []):
if "train_dataset" not in config["dataset"].keys():
config["dataset"]["train_dataset"] = {}
if config["dataset"]["train_dataset"].get("ann_dir", None):
config["dataset"]["train_dataset"]["ann_dir"].append(get_handler_root(train_ds) + "/masks/train")
config["dataset"]["train_dataset"]["img_dir"].append(get_handler_root(train_ds) + "/images/train")
else:
config["dataset"]["train_dataset"]["ann_dir"] = [get_handler_root(train_ds) + "/masks/train"]
config["dataset"]["train_dataset"]["img_dir"] = [get_handler_root(train_ds) + "/images/train"]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
if job_context.action == "train":
eval_key = "val_dataset"
else:
eval_key = "test_dataset"
config["dataset"][eval_key] = {}
config["dataset"][eval_key]["ann_dir"] = get_handler_root(eval_ds) + "/masks/val"
config["dataset"][eval_key]["img_dir"] = get_handler_root(eval_ds) + "/images/val"
return config
faster_rcnn = detectnet_v2
def yolo_v4(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Yolo_v4"""
# Identical to detectnet_v2: validation_data_sources instead of validation_data_source
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
config["dataset_config"]["data_sources"] = []
for train_ds in handler_metadata.get("train_datasets", []):
ds_source_dict = {}
ds_source_dict["tfrecords_path"] = get_handler_root(train_ds) + "/tfrecords/*"
ds_source_dict["image_directory_path"] = get_handler_root(train_ds) + "/"
config["dataset_config"]["data_sources"].append(ds_source_dict)
if config["dataset_config"].get("validation_fold") is not None:
del config["dataset_config"]["validation_fold"]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["dataset_config"]["validation_data_sources"] = {}
config["dataset_config"]["validation_data_sources"]["tfrecords_path"] = get_handler_root(eval_ds) + "/tfrecords/*"
config["dataset_config"]["validation_data_sources"]["image_directory_path"] = get_handler_root(eval_ds) + "/"
return config
yolo_v3 = yolo_v4
yolo_v4_tiny = yolo_v4
def ssd(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for SSD"""
# Identical to yolo_v4: tfrecords_path ends with -* as opposed to *
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
config["dataset_config"]["data_sources"] = []
for train_ds in handler_metadata.get("train_datasets", []):
ds_source_dict = {}
ds_source_dict["tfrecords_path"] = get_handler_root(train_ds) + "/tfrecords/tfrecords-*"
# ds_source_dict["image_directory_path"] = get_handler_root(train_ds)+"/"
config["dataset_config"]["data_sources"].append(ds_source_dict)
if config["dataset_config"].get("validation_fold") is not None:
del config["dataset_config"]["validation_fold"]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["dataset_config"]["validation_data_sources"] = {}
config["dataset_config"]["validation_data_sources"]["label_directory_path"] = get_handler_root(eval_ds) + "/labels"
config["dataset_config"]["validation_data_sources"]["image_directory_path"] = get_handler_root(eval_ds) + "/images"
return config
retinanet = ssd
dssd = ssd
def object_detection(config, job_context, handler_metadata):
"""Returns config directly as no changes are required"""
return config
def lprnet(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for LPRNet"""
# Assumes every train dataset and the eval dataset - all have same characters.txt
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
config["dataset_config"]["data_sources"] = []
for train_ds in handler_metadata.get("train_datasets", []):
ds_source_dict = {}
ds_source_dict["label_directory_path"] = get_handler_root(train_ds) + "/label"
ds_source_dict["image_directory_path"] = get_handler_root(train_ds) + "/image"
config["dataset_config"]["data_sources"].append(ds_source_dict)
config["dataset_config"]["characters_list_file"] = get_handler_root(train_ds) + "/characters.txt"
if config["dataset_config"].get("validation_fold") is not None:
del config["dataset_config"]["validation_fold"]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["dataset_config"]["validation_data_sources"] = {}
config["dataset_config"]["validation_data_sources"]["label_directory_path"] = get_handler_root(eval_ds) + "/label"
config["dataset_config"]["validation_data_sources"]["image_directory_path"] = get_handler_root(eval_ds) + "/image"
config["dataset_config"]["characters_list_file"] = get_handler_root(eval_ds) + "/characters.txt"
return config
def efficientdet_tf1(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for EfficientDetTf1"""
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
print("Warning: EfficientDet supports only one train dataset", file=sys.stderr)
config["dataset_config"]["training_file_pattern"] = get_handler_root(train_ds) + "/tfrecords/*.tfrecord"
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["dataset_config"]["validation_file_pattern"] = get_handler_root(eval_ds) + "/tfrecords/*.tfrecord"
config["dataset_config"]["validation_json_file"] = get_handler_root(eval_ds) + "/annotations.json"
return config
def efficientdet_tf2(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for EfficientDet tf2"""
# Init
if "data" not in list(config.keys()):
config["dataset"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
print("Warning: EfficientDet supports only one train dataset", file=sys.stderr)
handler_root = get_handler_root(train_ds)
parent_dir = os.path.dirname(glob.glob(handler_root + "/**/*.tfrecord", recursive=True)[0])
config["dataset"]["train_tfrecords"] = [parent_dir + "/*.tfrecord"]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
handler_root = get_handler_root(eval_ds)
parent_dir = os.path.dirname(glob.glob(handler_root + "/**/*.tfrecord", recursive=True)[0])
config["dataset"]["val_tfrecords"] = [parent_dir + "/*.tfrecord"]
config["dataset"]["val_json_file"] = handler_root + "/annotations.json"
return config
def mask_rcnn(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Mask RCNN"""
# Init
if "data_config" not in list(config.keys()):
config["data_config"] = {}
# Training datasets
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
print("Warning: MaskRCNN supports only one train dataset", file=sys.stderr)
config["data_config"]["training_file_pattern"] = get_handler_root(train_ds) + "/tfrecords/*.tfrecord"
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
config["data_config"]["validation_file_pattern"] = get_handler_root(eval_ds) + "/tfrecords/*.tfrecord"
config["data_config"]["val_json_file"] = get_handler_root(eval_ds) + "/annotations.json"
return config
def multitask_classification(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Multi-task classification"""
# Init
if "dataset_config" not in list(config.keys()):
config["dataset_config"] = {}
parent_action = get_handler_job_metadata(job_context.handler_id, job_context.parent_id).get("action")
if job_context.action in ("train", "retrain"):
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
root = get_handler_root(train_ds)
print("Warning: Multitask Classification supports only one train dataset", file=sys.stderr)
config["dataset_config"]["train_csv_path"] = root + "/train.csv"
config["dataset_config"]["val_csv_path"] = root + "/val.csv"
config["dataset_config"]["image_directory_path"] = root + "/images_train"
elif job_context.action == "evaluate" or (job_context.action == "inference" and parent_action in ("gen_trt_engine", "trtexec")):
if handler_metadata.get("eval_dataset", None) is not None:
eval_ds = handler_metadata.get("eval_dataset", None)
root = get_handler_root(eval_ds)
config["dataset_config"]["val_csv_path"] = root + "/val.csv"
config["dataset_config"]["image_directory_path"] = root + "/images_val"
return config
def classification_tf1(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Classification-tf1"""
if "train_config" not in list(config.keys()):
config["train_config"] = {}
if "eval_config" not in list(config.keys()):
config["eval_config"] = {}
print("Warning: Classification supports only one train dataset", file=sys.stderr)
print("Warning: Train, eval datasets are both required to run Classification actions - train, evaluate, retrain, inference", file=sys.stderr)
train_datasets = handler_metadata.get("train_datasets", [])
if train_datasets != []:
config["train_config"]["train_dataset_path"] = get_handler_root(train_datasets[0]) + "/images_train"
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
if os.path.exists(get_handler_root(eval_ds) + "/images_val"):
config["train_config"]["val_dataset_path"] = get_handler_root(eval_ds) + "/images_val"
config["eval_config"]["eval_dataset_path"] = get_handler_root(eval_ds) + "/images_val"
else:
print("Warning: eval_ds+/images_val does not exist", file=sys.stderr)
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is not None:
if os.path.exists(get_handler_root(infer_ds) + "/images_test"):
config["eval_config"]["eval_dataset_path"] = get_handler_root(infer_ds) + "/images_test"
else:
print("Warning: infer_ds+/images_test does not exist", file=sys.stderr)
return config
def classification_tf2(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Classification-tf2"""
if "dataset" not in list(config.keys()):
config["dataset"] = {}
if "evaluate" not in list(config.keys()):
config["evaluate"] = {}
print("Warning: Classification-tf2 supports only one train dataset", file=sys.stderr)
print("Warning: Train, eval datasets are both required to run Classification actions - train, evaluate, retrain, inference", file=sys.stderr)
train_datasets = handler_metadata.get("train_datasets", [])
if train_datasets != []:
config["dataset"]["train_dataset_path"] = get_handler_root(train_datasets[0]) + "/images_train"
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
if os.path.exists(get_handler_root(eval_ds) + "/images_val"):
config["dataset"]["val_dataset_path"] = get_handler_root(eval_ds) + "/images_val"
else:
print("Warning: eval_ds+/images_val does not exist", file=sys.stderr)
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is not None:
if os.path.exists(get_handler_root(infer_ds) + "/images_test"):
config["evaluate"]["dataset_path"] = get_handler_root(infer_ds) + "/images_test"
else:
print("Warning: infer_ds+/images_test does not exist", file=sys.stderr)
return config
def classification_pyt(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Classification-pyt"""
if "val" not in list(config["dataset"]["data"].keys()):
config["dataset"]["data"]["val"] = {}
if "test" not in list(config["dataset"]["data"].keys()):
config["dataset"]["data"]["test"] = {}
print("Warning: Classification-pyt supports only one train dataset", file=sys.stderr)
print("Warning: Train, eval datasets are both required to run Classification actions - train, evaluate, inference", file=sys.stderr)
train_datasets = handler_metadata.get("train_datasets", [])
if train_datasets != []:
config["dataset"]["data"]["train"]["data_prefix"] = get_handler_root(train_datasets[0]) + "/images_train"
config["dataset"]["data"]["train"]["classes"] = get_handler_root(train_datasets[0]) + "/classes.txt"
eval_ds = handler_metadata.get("eval_dataset", None)
if eval_ds is not None:
if os.path.exists(get_handler_root(eval_ds) + "/images_val"):
config["dataset"]["data"]["val"]["data_prefix"] = get_handler_root(eval_ds) + "/images_val"
config["dataset"]["data"]["val"]["classes"] = get_handler_root(eval_ds) + "/classes.txt"
else:
print("Warning: eval_ds+/images_val does not exist", file=sys.stderr)
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is not None:
if os.path.exists(get_handler_root(infer_ds) + "/images_test"):
config["dataset"]["data"]["test"]["data_prefix"] = get_handler_root(infer_ds) + "/images_test"
config["dataset"]["data"]["test"]["classes"] = get_handler_root(infer_ds) + "/classes.txt"
else:
print("Warning: infer_ds+/images_test does not exist", file=sys.stderr)
return config
def bpnet(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for BPNET"""
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
root = get_handler_root(train_ds)
with open(root + "/coco_spec.json", "r", encoding='utf-8') as coco_spec_file:
coco_spec_json = json.load(coco_spec_file)
coco_spec_json["root_directory_path"] = root + "/"
with open(root + "/coco_spec.json", "w", encoding='utf-8') as coco_spec_file:
json.dump(coco_spec_json, coco_spec_file)
if job_context.action in ("train", "retrain"):
config["dataloader"]["pose_config"]["pose_config_path"] = root + "/bpnet_18joints.json"
config["dataloader"]["dataset_config"]["root_data_path"] = root + "/"
config["dataloader"]["dataset_config"]["train_records_folder_path"] = root + "/"
config["dataloader"]["dataset_config"]["val_records_folder_path"] = root + "/"
config["dataloader"]["dataset_config"]["dataset_specs"]["coco"] = root + "/coco_spec.json"
config["inference_spec"] = root + "/infer_spec.yaml"
else:
train_ds = handler_metadata.get("id")
root = get_handler_root(train_ds)
config["root_directory_path"] = root + "/"
return config
def fpenet(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for FPENET"""
afw_suffix = ""
if "num_keypoints" in config.keys():
if config["num_keypoints"] == 10:
afw_suffix = "_10"
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
root = get_handler_root(train_ds)
config["dataloader"]["dataset_info"]["tfrecords_directory_path"] = root + "/data/tfrecords/"
if job_context.action != "export":
config["dataloader"]["dataset_info"]["tfrecords_set_id_train"] = f"afw{afw_suffix}"
config["dataloader"]["dataset_info"]["tfrecords_set_id_val"] = f"afw{afw_suffix}"
config["dataloader"]["kpiset_info"]["tfrecords_set_id_kpi"] = f"afw{afw_suffix}"
if config["num_keypoints"] == 10:
config["dataloader"]["augmentation_info"]["modulus_spatial_augmentation"]["hflip_probability"] = 0.0
if job_context.action == "inference":
with open(root + "/data.json", "r", encoding='utf-8') as inference_file:
inference_json = json.load(inference_file)
modified_inference_json = []
for img_info in inference_json:
img_info["filename"] = os.path.join(root, "data", "afw", os.path.basename(img_info["filename"]))
modified_inference_json.append(img_info)
with open(root + "/data.json", "w", encoding='utf-8') as inference_file:
json.dump(modified_inference_json, inference_file)
else:
train_ds = handler_metadata.get("id")
root = get_handler_root(train_ds)
config["sets"] = [f"afw{afw_suffix}"]
config["gt_root_path"] = root + "/"
config["save_root_path"] = root + "/"
config["image_root_path"] = root + "/"
return config
def action_recogntion_dynamic_config(config, action):
"""Dynamically drop out spec parameters based on certain other parameters"""
model_type = config["model"]["model_type"] # rgb/of/joint
input_type = config["model"]["input_type"] # 3d/2d
if model_type == "rgb":
config["model"].pop("of_seq_length", None)
if action == "train":
config["model"].pop("of_pretrained_num_classes", None)
config["dataset"]["augmentation_config"].pop("of_input_mean", None)
config["dataset"]["augmentation_config"].pop("of_input_std", None)
config["model"].pop("of_pretrained_model_path", None)
elif model_type == "of":
config["model"].pop("rgb_seq_length", None)
if action == "train":
config["model"].pop("rgb_pretrained_num_classes", None)
config["dataset"]["augmentation_config"].pop("rgb_input_mean", None)
config["dataset"]["augmentation_config"].pop("rgb_input_std", None)
config["model"].pop("rgb_pretrained_model_path", None)
elif model_type == "joint":
if "rgb_pretrained_model_path" in config["model"].keys():
ptm_paths = config["model"]["rgb_pretrained_model_path"].split(",")
rgb_pretrained_model_path = ptm_paths[0] if ptm_paths[0].find("_rgb_") else ptm_paths[1]
of_pretrained_model_path = ptm_paths[0] if ptm_paths[0].find("_of_") else ptm_paths[1]
config["model"]["rgb_pretrained_model_path"] = rgb_pretrained_model_path
config["model"]["of_pretrained_model_path"] = of_pretrained_model_path
config["dataset"]["label_map"] = config["dataset"]["label_map_" + input_type]
config["dataset"].pop("label_map_2d", None)
config["dataset"].pop("label_map_3d", None)
return config
def action_recognition(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params and,
makes changes to config baed on model and input_type for Action recognition
"""
config = action_recogntion_dynamic_config(config, job_context.action)
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
root = get_handler_root(train_ds)
if job_context.action == "train":
config["dataset"]["train_dataset_dir"] = os.path.join(root, "train")
config["dataset"]["val_dataset_dir"] = os.path.join(root, "test")
elif job_context.action == "evaluate":
config["evaluate"]["test_dataset_dir"] = os.path.join(root, "test")
elif job_context.action == "inference":
config["inference"]["inference_dataset_dir"] = os.path.join(root, "test/smile")
return config
def pointpillars(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Pointpillars"""
train_ds = handler_metadata.get("train_datasets", [])
if train_ds != []:
train_ds = train_ds[0]
else:
train_ds = handler_metadata.get("id")
root = get_handler_root(train_ds)
config["dataset"]["data_path"] = root
return config
def pose_classification_dynamic_config(config, action):
"""Dynamically drop out spec parameters based on certain other parameters"""
model_type = config["model"]["graph_layout"] # openpose/nvidia
if model_type == "nvidia":
if action == "train":
config["dataset"].pop("random_choose", None)
config["dataset"].pop("random_move", None)
config["dataset"].pop("window_size", None)
config["dataset"]["label_map"] = config["dataset"]["label_map_nvidia"]
elif model_type == "openpose":
if action == "train":
config["model"].pop("pretrained_model_path", None)
config["dataset"]["label_map"] = config["dataset"]["label_map_kinetics"]
config["dataset"].pop("label_map_kinetics", None)
config["dataset"].pop("label_map_nvidia", None)
return config
def pose_classification(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params and,
makes changes to config baed on model type for Pose classification
"""
model_type = config["model"]["graph_layout"] # openpose/nvidia
if model_type == "openpose":
model_type = "kinetics"
pose_classification_dynamic_config(config, job_context.action)
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
root = get_handler_root(train_ds)
if job_context.action == "train":
config["dataset"]["train_dataset"] = {}
config["dataset"]["val_dataset"] = {}
config["dataset"]["train_dataset"]["data_path"] = os.path.join(root, model_type, "train_data.npy")
config["dataset"]["train_dataset"]["label_path"] = os.path.join(root, model_type, "train_label.pkl")
config["dataset"]["val_dataset"]["data_path"] = os.path.join(root, model_type, "val_data.npy")
config["dataset"]["val_dataset"]["label_path"] = os.path.join(root, model_type, "val_label.pkl")
elif job_context.action in ("evaluate", "inference"):
config[job_context.action]["test_dataset"] = {}
config[job_context.action]["test_dataset"]["data_path"] = os.path.join(root, model_type, "val_data.npy")
if job_context.action == "evalute":
config[job_context.action]["test_dataset"]["label_path"] = os.path.join(root, model_type, "val_label.pkl")
return config
def re_identification(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Re-identification"""
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
root = get_handler_root(train_ds)
if job_context.action == "train":
config["dataset"]["train_dataset_dir"] = os.path.join(root, "sample_train")
config["dataset"]["test_dataset_dir"] = os.path.join(root, "sample_test")
config["dataset"]["query_dataset_dir"] = os.path.join(root, "sample_query")
elif job_context.action in ("evaluate", "inference"):
config[job_context.action]["test_dataset"] = os.path.join(root, "sample_test")
config[job_context.action]["query_dataset"] = os.path.join(root, "sample_query")
return config
def deformable_detr(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Deformable-Detr"""
# Train dataset
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
train_root = get_handler_root(train_ds)
if job_context.action == "train":
config["dataset"]["train_data_sources"] = [{}]
config["dataset"]["train_data_sources"][0]["image_dir"] = os.path.join(train_root, "images")
config["dataset"]["train_data_sources"][0]["json_file"] = os.path.join(train_root, "annotations.json")
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
eval_root = get_handler_root(eval_ds)
if eval_ds is not None:
if job_context.action == "train":
config["dataset"]["val_data_sources"] = [{}]
config["dataset"]["val_data_sources"][0]["image_dir"] = os.path.join(eval_root, "images")
config["dataset"]["val_data_sources"][0]["json_file"] = os.path.join(eval_root, "annotations.json")
if job_context.action == "evaluate":
config["dataset"]["test_data_sources"] = {}
config["dataset"]["test_data_sources"]["image_dir"] = os.path.join(eval_root, "images")
config["dataset"]["test_data_sources"]["json_file"] = os.path.join(eval_root, "annotations.json")
# Inference dataset
infer_ds = handler_metadata.get("inference_dataset", None)
infer_root = get_handler_root(infer_ds)
if infer_ds is not None:
if job_context.action == "inference":
config["dataset"]["infer_data_sources"] = {}
config["dataset"]["infer_data_sources"]["image_dir"] = [os.path.join(infer_root, "images")]
config["dataset"]["infer_data_sources"]["classmap"] = os.path.join(infer_root, "label_map.txt")
return config
def mal(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for MAL"""
# Train dataset
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
train_root = get_handler_root(train_ds)
if job_context.action in ("evaluate", "inference", "train"):
if "dataset" not in config.keys():
config["dataset"] = {}
config["dataset"]["train_img_dir"] = os.path.join(train_root, "images")
config["dataset"]["train_ann_path"] = os.path.join(train_root, "annotations.json")
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
eval_root = get_handler_root(eval_ds)
if eval_ds is not None:
if job_context.action in ("evaluate", "inference", "train"):
if "dataset" not in config.keys():
config["dataset"] = {}
config["dataset"]["val_img_dir"] = os.path.join(eval_root, "images")
config["dataset"]["val_ann_path"] = os.path.join(eval_root, "annotations.json")
# Inference dataset
infer_ds = handler_metadata.get("inference_dataset", None)
infer_root = get_handler_root(infer_ds)
if infer_ds is not None:
if job_context.action == "inference":
if "inference" not in config.keys():
config["inference"] = {}
config["inference"]["img_dir"] = os.path.join(infer_root, "images")
config["inference"]["ann_path"] = os.path.join(infer_root, "annotations.json")
return config
dino = deformable_detr
instance_segmentation = object_detection
def ml_recog(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for Metric Learning Recognition"""
# Train dataset
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
train_root = get_handler_root(train_ds)
if "dataset" not in config.keys():
config["dataset"] = {}
if job_context.action == "train":
config["dataset"]["train_dataset"] = os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "known_classes", "train")
config["dataset"]["val_dataset"] = {
"reference": os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "known_classes", "reference"),
"query": os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "known_classes", "val")}
if job_context.action == "evaluate":
config["dataset"]["val_dataset"] = {
"reference": os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "unknown_classes", "reference"),
"query": os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "unknown_classes", "test")}
if job_context.action == "inference":
if "inference" not in config.keys():
config["inference"] = {}
config["dataset"]["val_dataset"] = {
"reference": os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "unknown_classes", "reference"),
"query": ""}
config["inference"]["input_path"] = os.path.join(train_root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo", "unknown_classes", "test")
return config
def ocdnet(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for OCDNET"""
parent_action = get_handler_job_metadata(job_context.handler_id, job_context.parent_id).get("action")
if parent_action == "retrain":
config["model"]["load_pruned_graph"] = True
# Train dataset
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
train_root = get_handler_root(train_ds)
if "dataset" not in config.keys():
config["dataset"] = {}
config["dataset"]["train_dataset"] = {}
config["dataset"]["validate_dataset"] = {}
if job_context.action in ("train", "retrain"):
config["dataset"]["train_dataset"]["data_path"] = [os.path.join(train_root, "train")]
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
eval_root = get_handler_root(eval_ds)
if eval_ds is not None:
if "dataset" not in config.keys():
config["dataset"] = {}
config["dataset"]["train_dataset"] = {}
config["dataset"]["validate_dataset"] = {}
config["dataset"]["validate_dataset"]["data_path"] = [os.path.join(eval_root, "test")]
if job_context.action == "inference":
if "inference" not in config.keys():
config["inference"] = {}
config["inference"]["input_folder"] = os.path.join(eval_root, "test/img")
return config
def ocrnet(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for OCRNET"""
if job_context.action == "dataset_convert":
ds = handler_metadata.get("id")
root = get_handler_root(ds)
if "dataset_convert" not in config.keys():
config["dataset_convert"] = {}
sub_folder = "train"
if "test" in os.listdir(root):
sub_folder = "test"
config["dataset_convert"]["input_img_dir"] = f"{root}/{sub_folder}"
config["dataset_convert"]["gt_file"] = f"{root}/{sub_folder}/gt_new.txt"
config["dataset_convert"]["results_dir"] = f"{root}/{sub_folder}/lmdb"
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
eval_root = get_handler_root(eval_ds)
# Train dataset
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
train_root = get_handler_root(train_ds)
if "dataset" not in config.keys():
config["dataset"] = {}
if job_context.action in ("train", "retrain"):
config["dataset"]["train_dataset_dir"] = [os.path.join(train_root, "train/lmdb")]
config["dataset"]["val_dataset_dir"] = os.path.join(eval_root, "test/lmdb")
config["dataset"]["character_list_file"] = os.path.join(eval_root, "character_list")
if eval_ds is not None:
if job_context.action == "evaluate":
if "evaluate" not in config.keys():
config["evaluate"] = {}
config["evaluate"]["test_dataset_dir"] = os.path.join(eval_root, "test/lmdb")
if job_context.action == "inference":
if "inference" not in config.keys():
config["inference"] = {}
config["inference"]["inference_dataset_dir"] = os.path.join(eval_root, "test")
return config
def optical_inspection(config, job_context, handler_metadata):
"""Assigns paths of data sources to the respective config params for OCRNET"""
# Train dataset
if handler_metadata.get("train_datasets", []) != []:
train_ds = handler_metadata.get("train_datasets", [])[0]
train_root = get_handler_root(train_ds)
if "dataset" not in config.keys():
config["dataset"] = {}
if "train_dataset" not in config["dataset"].keys():
config["dataset"]["train_dataset"] = {}
config["dataset"]["train_dataset"]["images_dir"] = os.path.join(train_root, "images")
config["dataset"]["train_dataset"]["csv_path"] = os.path.join(train_root, "dataset.csv")
# Eval dataset
eval_ds = handler_metadata.get("eval_dataset", None)
eval_root = get_handler_root(eval_ds)
if eval_ds is not None:
if "dataset" not in config.keys():
config["dataset"] = {}
if "validation_dataset" not in config["dataset"].keys():
config["dataset"]["validation_dataset"] = {}
if "test_dataset" not in config["dataset"].keys():
config["dataset"]["test_dataset"] = {}
config["dataset"]["validation_dataset"]["images_dir"] = os.path.join(eval_root, "images")
config["dataset"]["validation_dataset"]["csv_path"] = os.path.join(eval_root, "dataset.csv")
config["dataset"]["test_dataset"]["images_dir"] = os.path.join(eval_root, "images")
config["dataset"]["test_dataset"]["csv_path"] = os.path.join(eval_root, "dataset.csv")
# Inference dataset
infer_ds = handler_metadata.get("inference_dataset", None)
infer_root = get_handler_root(infer_ds)
if infer_ds is not None:
if "dataset" not in config.keys():
config["dataset"] = {}
if "infer_dataset" not in config["dataset"].keys():
config["dataset"]["infer_dataset"] = {}
config["dataset"]["infer_dataset"]["images_dir"] = os.path.join(infer_root, "images")
config["dataset"]["infer_dataset"]["csv_path"] = os.path.join(infer_root, "dataset.csv")
return config
def analytics(config, job_context, handler_metadata):
"""Function to create data sources for analytics module"""
config["data"]["image_dir"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "images")
if config["data"]["input_format"] == "COCO":
config["data"]["ann_path"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "annotations.json")
elif config["data"]["input_format"] == "KITTI":
config["data"]["ann_path"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "labels")
return config
def annotations(config, job_context, handler_metadata):
"""Function to create data sources for annotations module"""
if config["data"]["input_format"] == "COCO":
config["coco"]["ann_file"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "annotations.json")
elif config["data"]["input_format"] == "KITTI":
config["kitti"]["image_dir"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "images")
config["kitti"]["label_dir"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "labels")
return config
def augmentation(config, job_context, handler_metadata):
"""Function to create data sources for augmentation module"""
config["data"]["image_dir"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "images")
if config["data"]["dataset_type"] == "kitti":
config["data"]["ann_path"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "labels")
elif config["data"]["dataset_type"] == "coco":
config["data"]["ann_path"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "annotations.json")
return config
def auto_label(config, job_context, handler_metadata):
"""Function to create data sources for auto_label module"""
config["inference"]["img_dir"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "images")
config["inference"]["ann_path"] = os.path.join(get_handler_root(handler_metadata["inference_dataset"]), "annotations.json")
return config
DS_CONFIG_TO_FUNCTIONS = {"detectnet_v2": detectnet_v2,
"faster_rcnn": faster_rcnn,
"yolo_v4": yolo_v4,
"yolo_v4_tiny": yolo_v4_tiny,
"yolo_v3": yolo_v3,
"ssd": ssd,
"dssd": dssd,
"retinanet": retinanet,
"unet": unet,
"segformer": segformer,
"lprnet": lprnet,
"efficientdet_tf1": efficientdet_tf1,
"efficientdet_tf2": efficientdet_tf2,
"mask_rcnn": mask_rcnn,
"multitask_classification": multitask_classification,
"classification_pyt": classification_pyt,
"classification_tf1": classification_tf1,
"classification_tf2": classification_tf2,
"bpnet": bpnet,
"fpenet": fpenet,
"action_recognition": action_recognition,
"mal": mal,
"ml_recog": ml_recog,
"ocdnet": ocdnet,
"ocrnet": ocrnet,
"optical_inspection": optical_inspection,
"pointpillars": pointpillars,
"pose_classification": pose_classification,
"re_identification": re_identification,
"deformable_detr": deformable_detr,
"dino": dino,
"object_detection": object_detection,
"instance_segmentation": instance_segmentation,
"analytics": analytics,
"annotations": annotations,
"augmentation": augmentation,
"auto_label": auto_label}
| tao_front_end_services-main | api/handlers/infer_data_sources.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline construction for all model actions"""
import os
import json
import threading
import time
import uuid
import copy
import sys
import traceback
import yaml
from job_utils import executor as jobDriver
from handlers.docker_images import DOCKER_IMAGE_MAPPER
from handlers.infer_params import CLI_CONFIG_TO_FUNCTIONS
from handlers.infer_data_sources import DS_CONFIG_TO_FUNCTIONS
from handlers.stateless_handlers import get_handler_root, get_handler_spec_root, get_handler_log_root, get_handler_job_metadata, get_handler_metadata, update_job_results, update_job_status, get_toolkit_status, load_json_data
from handlers.utilities import StatusParser, build_cli_command, write_nested_dict, read_nested_dict, read_network_config, load_json_spec, search_for_ptm, process_classwise_config, validate_gpu_param_value, NO_SPEC_ACTIONS_MODEL, _OD_NETWORKS, _TF1_NETWORKS, AUTOML_DISABLED_NETWORKS
from automl.utils import delete_lingering_checkpoints, wait_for_job_completion
from specs_utils import json_to_kitti, json_to_yaml
SPEC_BACKEND_TO_FUNCTIONS = {"protobuf": json_to_kitti.kitti, "yaml": json_to_yaml.yml}
class ActionPipeline:
"""
ActionPipeline - Train, Evaluate, Retrain, Prune, Export, Gen_trt_engine (Model),
TBD: DatasetConvert (for OD networks), Augment
To spawn a job by handling all dependencies, monitor and close a job end-to-end
- Inputs:
- JobContext: To communicate with the Model / Dataset handler
- Requires Handler & AppHandler to run()
- Processes spec requirements
- Prepares specs (generate_specs step)
- dataset config (defined for each network's train, evaluate, retrain)
- ptm config (for train, evaluate)
- parent model and load graph (for retrain)
- CLI paramters (for all actions)
- Classwise configs (for all applicable train, evaluate, retrain) => currently for OD networks only
- Converts json to spec backend
- Prepare command (generate_run_command)
- Generate run command
- Infers image from config.json and platform information (if applicable) and sends it to K8s
- Interacts with status.json parser (ETA: TBD) and communicated to Handlers through JobContext
- Supports delete job
- Supports resume for train
- Exposed functions:
- run():
- delete():
- resume(): Same as run()
- Internal functions:
- parse_status():
- generate_config(): Assumes <action>.json exists
- generate_run_command():
- Helper functions():
- __init__():
- _read_api_params()
"""
def __init__(self, job_context):
"""Initialize the ActionPipeline class"""
# Job Context - bridge between Action and Handler
self.job_context = job_context
# Get some handler related data
self.job_id = self.job_context.id
self.network = self.job_context.network
self.network_config = read_network_config(self.network)
self.api_params = self._read_api_params()
self.image = DOCKER_IMAGE_MAPPER[self.api_params.get("image", "")]
self.handler_metadata = get_handler_metadata(self.job_context.handler_id)
self.handler_spec_root = get_handler_spec_root(self.job_context.handler_id)
self.handler_root = get_handler_root(self.job_context.handler_id)
self.handler_log_root = get_handler_log_root(self.job_context.handler_id)
self.handler_id = self.job_context.handler_id
self.tao_deploy_actions = False
self.action_suffix = ""
self.parent_job_action = get_handler_job_metadata(self.handler_id, self.job_context.parent_id).get("action")
if self.job_context.action in ("gen_trt_engine", "trtexec") or (self.parent_job_action in ("gen_trt_engine", "trtexec") and self.network != "bpnet"):
self.tao_deploy_actions = True
if self.job_context.action in ("evaluate", "inference") and self.job_context.network in _TF1_NETWORKS:
self.action_suffix = "_tao_deploy"
# This will be run inside a thread
self.thread = None
# Parameters to launch a job and monitor status
self.job_name = str(self.job_context.id)
self.spec = {}
self.config = {}
self.platform = None
self.run_command = ""
self.status_file = None
def _read_api_params(self):
"""Read network config json file and return api_params key"""
return self.network_config.get("api_params", {})
def generate_config(self):
"""Generate config for this action; Actions may override"""
return {}, {}
def generate_run_command(self):
"""Generate run command for this action; Actions may override"""
return "", None, None
def post_run(self):
"""Run & modify internal variables after toolkit job is done; Actions may override"""
return
def run(self):
"""Calls necessary setup functions; calls job creation; monitors and update status of the job"""
# Set up
self.thread = threading.current_thread()
try:
# Generate config
self.spec, self.config = self.generate_config()
# Generate run command
self.run_command, self.status_file, outdir = self.generate_run_command()
# Pipe logs into logfile: <output_dir>/logs_from_toolkit.txt
if not outdir:
outdir = CLI_CONFIG_TO_FUNCTIONS["output_dir"](self.job_context, self.handler_metadata)
logfile = os.path.join(self.handler_log_root, str(self.job_context.id) + ".txt")
# Pipe stdout and stderr to logfile
self.run_command += f" > {logfile} 2>&1 >> {logfile}"
# After command runs, make sure subdirs permission allows anyone to enter and delete
self.run_command += f"; find {outdir} -type d | xargs chmod 777"
# After command runs, make sure artifact files permission allows anyone to delete
self.run_command += f"; find {outdir} -type f | xargs chmod 666"
# Optionally, pipe self.run_command into a log file
print(self.run_command, self.status_file, file=sys.stderr)
# Set up StatusParser
status_parser = StatusParser(str(self.status_file), self.job_context.network, outdir)
# Get image
# If current or parent action is gen_trt_engine or trtexec, then it'a a tao-deploy container action
if self.tao_deploy_actions:
self.image = DOCKER_IMAGE_MAPPER["tao-deploy"]
# Default image for dataset convert for OD networks is tlt-tf1, so override that
elif self.job_context.action == "convert_efficientdet_tf2":
self.image = DOCKER_IMAGE_MAPPER["tlt-tf2"]
print(self.image, file=sys.stderr)
# Convert self.spec to a backend and post it into a <self.handler_spec_root><job_id>.txt file
if self.spec:
if self.api_params["spec_backend"] == "json":
kitti_out = self.spec
kitti_out = json.dumps(kitti_out, indent=4)
elif self.job_context.action == "convert_efficientdet_tf2":
kitti_out = SPEC_BACKEND_TO_FUNCTIONS["yaml"](self.spec)
else:
kitti_out = SPEC_BACKEND_TO_FUNCTIONS[self.api_params["spec_backend"]](self.spec)
# store as kitti
action_spec_path_kitti = CLI_CONFIG_TO_FUNCTIONS["experiment_spec"](self.job_context, self.handler_metadata)
with open(action_spec_path_kitti, "w", encoding='utf-8') as f:
f.write(kitti_out)
# Submit to K8s
# Platform is None, but might be updated in self.generate_config() or self.generate_run_command()
# If platform is indeed None, jobDriver.create would take care of it.
num_gpu = -1
if self.job_context.action not in ['train', 'retrain', 'finetune']:
num_gpu = 1
jobDriver.create(self.job_name, self.image, self.run_command, num_gpu=num_gpu, accelerator=self.platform)
print("Job created", self.job_name, file=sys.stderr)
# Poll every 5 seconds
k8s_status = jobDriver.status(self.job_name)
while k8s_status in ["Done", "Error", "Running", "Pending", "Creating"]:
time.sleep(5)
# If Done, try running self.post_run()
if k8s_status == "Done":
update_job_status(self.handler_id, self.job_id, status="Running")
# Retrieve status one last time!
new_results = status_parser.update_results()
update_job_results(self.handler_id, self.job_id, result=new_results)
try:
print("Post running", file=sys.stderr)
# If post run is done, make it done
self.post_run()
update_job_status(self.handler_id, self.job_id, status="Done")
break
except:
# If post run fails, call it Error
update_job_status(self.handler_id, self.job_id, status="Error")
break
# If running in K8s, update results to job_context
elif k8s_status == "Running":
update_job_status(self.handler_id, self.job_id, status="Running")
# Update results
new_results = status_parser.update_results()
update_job_results(self.handler_id, self.job_id, result=new_results)
# Pending is if we have queueing systems down the road
elif k8s_status == "Pending":
continue
# Creating is if moebius-cloud job is in process of creating batch job
elif k8s_status == "Creating":
# need to get current status and make sure its going from creating to running
# till now moebius-job manager would have created batchjob
k8s_status = jobDriver.status(self.job_name)
continue
# If the job never submitted or errored out!
else:
update_job_status(self.handler_id, self.job_id, status="Error")
break
k8s_status = jobDriver.status(self.job_name)
toolkit_status = get_toolkit_status(self.handler_id, self.job_id)
print(f"Toolkit status for {self.job_id} is {toolkit_status}", file=sys.stderr)
if toolkit_status != "SUCCESS" and self.job_context.action != "trtexec":
update_job_status(self.handler_id, self.job_id, status="Error")
final_status = get_handler_job_metadata(self.handler_id, self.job_id).get("status", "Error")
print(f"Job Done: {self.job_name} Final status: {final_status}", file=sys.stderr)
with open(logfile, "a", encoding='utf-8') as f:
f.write("\nEOF\n")
return
except Exception:
# Something went wrong inside...
print(traceback.format_exc(), file=sys.stderr)
print(f"Job {self.job_name} did not start", file=sys.stderr)
update_job_status(self.handler_id, self.job_id, status="Error")
update_job_results(self.handler_id, self.job_id, result={"detailed_status": {"message": "Error due to unmet dependencies"}})
return
class CLIPipeline(ActionPipeline):
"""CLIPipeline for actions involve only cli params"""
def __init__(self, job_context):
"""Initialize the CLIPipeline class"""
super().__init__(job_context)
self.network = job_context.network
self.action = job_context.action
# Handle anomalies in network action names
if self.action == "retrain":
self.action = "train"
if self.action == "kmeans":
self.network = "yolo_v3"
if self.action == "augment":
self.network = ""
if self.network == "instance_segmentation" and self.action == "convert":
self.network = "mask_rcnn"
self.action = "dataset_convert"
if self.network == "object_detection" and self.action == "convert":
self.network = "detectnet_v2"
self.action = "dataset_convert"
if self.network == "object_detection" and "efficientdet" in self.action:
self.network = self.action.replace("convert_", "")
self.action = "dataset_convert"
if self.network == "object_detection" and self.action == "convert_and_index":
self.network = "ssd"
self.action = "dataset_convert"
def generate_config(self):
"""Generate config dictionary"""
# Get some variables
action = self.job_context.action
# User stored CLI param in a json file
spec_json_path = os.path.join(self.handler_spec_root, action + ".json")
if os.path.exists(spec_json_path):
config = load_json_spec(spec_json_path)
else:
config = {}
network = self.job_context.network
# Get CLI params from config json
network_config = read_network_config(network)
if action in network_config["cli_params"].keys():
for field_name, inference_fn in network_config["cli_params"][f"{action}{self.action_suffix}"].items():
field_value = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata)
if field_value:
config[field_name] = field_value
return {}, config
def generate_run_command(self):
"""Generate run command"""
overriden_output_dir = None
if self.action == "dataset_convert":
if self.network not in ("bpnet", "efficientdet_tf2", "fpenet", "ocrnet"):
self.config["results_dir"] = CLI_CONFIG_TO_FUNCTIONS["output_dir"](self.job_context, self.handler_metadata)
if self.network in ("efficientdet_tf1", "mask_rcnn"):
self.config["output_dir"] = os.path.join(self.handler_root, "tfrecords")
self.config["image_dir"] = os.path.join(self.handler_root, self.spec["dataset_convert"]["image_dir"])
self.config["annotations_file"] = os.path.join(self.handler_root, self.spec["dataset_convert"]["annotations_file"])
self.config["num_shards"] = self.spec["dataset_convert"]["num_shards"]
self.config["tag"] = self.spec["dataset_convert"]["tag"]
if self.network == "mask_rcnn":
self.config["include_masks"] = True
elif self.network == "bpnet":
if self.config["mode"] == "train":
self.config["output_filename"] = os.path.join(self.handler_root, "train")
elif self.config["mode"] == "test":
self.config["output_filename"] = os.path.join(self.handler_root, "val")
self.config["generate_masks"] = True
elif self.network == "efficientdet_tf2":
self.config["experiment_spec"] = CLI_CONFIG_TO_FUNCTIONS["experiment_spec"](self.job_context, self.handler_metadata)
elif self.network in _OD_NETWORKS:
self.config["output_filename"] = os.path.join(self.handler_root, "tfrecords/tfrecords")
self.config["verbose"] = True
self.config["dataset_export_spec"] = CLI_CONFIG_TO_FUNCTIONS["experiment_spec"](self.job_context, self.handler_metadata)
if self.action == "inference":
if self.network == "bpnet":
self.config["dump_visualizations"] = True
params_to_cli = build_cli_command(self.config, self.spec)
run_command = f"{self.network} {self.action} {params_to_cli}"
if self.action == "trtexec":
run_command = f"{self.action} {params_to_cli}"
status_file = os.path.join(self.handler_root, self.job_name, "status.json")
if self.action == "dataset_convert" and self.network == "ocrnet":
ds = self.handler_metadata.get("id")
root = get_handler_root(ds)
sub_folder = "train"
if "test" in os.listdir(root):
sub_folder = "test"
status_file = f"{root}/{sub_folder}/lmdb/status.json"
overriden_output_dir = os.path.dirname(status_file)
return run_command, status_file, overriden_output_dir
# Specs are modified as well => Train, Evaluate, Retrain Actions
class TrainVal(CLIPipeline):
"""Class for model actions which involves both spec file as well as cli params"""
def generate_config(self):
"""Generates spec and cli params
Returns:
spec: contains the network's spec file parameters
config: contains cli params
"""
network = self.job_context.network
action = self.job_context.action
# Infer CLI params
config = {}
network_config = read_network_config(network)
if action in network_config["cli_params"].keys():
for field_name, inference_fn in network_config["cli_params"][f"{action}{self.action_suffix}"].items():
field_value = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata)
if field_value:
config[field_name] = field_value
# Read spec from <action>.json for train, resume train, evaluate, retrain. If not there, use train.json
spec_json_path = os.path.join(self.handler_spec_root, action + ".json")
if not os.path.exists(spec_json_path):
if action in NO_SPEC_ACTIONS_MODEL:
spec_json_path = os.path.join(self.handler_spec_root, action + "train.json")
spec = load_json_spec(spec_json_path)
if "experiment_spec_file" in network_config["cli_params"][f"{action}{self.action_suffix}"].keys() and network_config["cli_params"][f"{action}{self.action_suffix}"]["experiment_spec_file"] == "parent_spec_copied":
spec_path = config["experiment_spec_file"]
with open(spec_path, "r", encoding='utf-8') as spec_file:
parent_spec = yaml.safe_load(spec_file)
if action in parent_spec.keys() and action in spec.keys():
parent_spec[action] = spec[action]
if "dataset" in parent_spec.keys() and "dataset" in spec.keys():
parent_spec["dataset"] = spec["dataset"]
spec = parent_spec
# Take .json file, read in spec params, infer spec params
if action in network_config["spec_params"].keys():
for field_name, inference_fn in network_config["spec_params"][action].items():
field_value = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata)
if field_value:
write_nested_dict(spec, field_name, field_value)
validate_gpu_param_value(spec)
# Move CLI params from spec to config
spec_keys_all = copy.deepcopy(list(spec.keys())) # Since we will be popping the value out, spec would change @ each iteration
for field_name in spec_keys_all:
cnd1 = field_name in network_config["cli_params"][action].keys()
cnd2 = network_config["cli_params"][f"{action}{self.action_suffix}"].get(field_name, None) == "from_csv"
cnd3 = type(spec[field_name]) in [str, float, int, bool]
if cnd1 and cnd2 and cnd3:
config[field_name] = spec.pop(field_name)
print("Loaded specs", file=sys.stderr)
# Infer dataset config
spec = DS_CONFIG_TO_FUNCTIONS[network](spec, self.job_context, self.handler_metadata)
print("Loaded dataset", file=sys.stderr)
# Add classwise config
classwise = self.api_params["classwise"] == "True"
if classwise:
spec = process_classwise_config(spec)
return spec, config
def post_run(self):
"""Carry's out functions after the job is executed"""
# If efficientdet_tf1 copy pruned model so that evaluate can access via parent relation
action = self.job_context.action
if self.network in ("efficientdet_tf1", "efficientdet_tf2", "classification_tf2", "ocdnet") and action == "retrain":
inference_fn = "parent_model"
pruned_model_path = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata)
_, file_extension = os.path.splitext(pruned_model_path)
print(f"Copying pruned model {pruned_model_path} after retrain to {self.handler_root}/{self.job_id}/pruned_model{file_extension}\n", file=sys.stderr)
os.system(f"cp {pruned_model_path} {self.handler_root}/{self.job_id}/pruned_model{file_extension}")
class ODConvert(CLIPipeline):
"""Class for Object detection networks which requires tfrecords conversion"""
# def __init__(self,job_context):
# super().__init__(job_context)
def generate_config(self):
"""Modify the spec parameters necessary for object detection convert and return the modified dictionary"""
# Read json
spec_json_path = os.path.join(self.handler_spec_root, f'{self.job_context.action}.json')
spec = load_json_spec(spec_json_path)
config = {}
if self.network == "efficientdet_tf2":
assert self.handler_metadata.get("format") == "coco"
if spec.get("dataset_convert") is None:
spec["dataset_convert"] = {}
spec["dataset_convert"]["image_dir"] = os.path.join(self.handler_root, "images")
spec["dataset_convert"]["annotations_file"] = os.path.join(self.handler_root, "annotations.json")
spec["dataset_convert"]["results_dir"] = CLI_CONFIG_TO_FUNCTIONS["output_dir"](self.job_context, self.handler_metadata)
# We don’t pass in the spec file to dataset convert process
# to efficientdet_tf1/mask-rcnn, hence we need to set the configs here.
# TODO: Have a common theme for all networks
elif self.network not in ("efficientdet_tf1", "mask_rcnn"):
assert self.handler_metadata.get("format") == "kitti"
# Add some parameters to spec
if spec.get("kitti_config") is None:
spec["kitti_config"] = {}
spec["kitti_config"]["image_dir_name"] = "images"
spec["kitti_config"]["label_dir_name"] = "labels"
spec["kitti_config"]["root_directory_path"] = self.handler_root + "/"
spec["image_directory_path"] = self.handler_root + "/"
if spec["kitti_config"].get("kitti_sequence_to_frames_file"):
lname = spec["kitti_config"].get("kitti_sequence_to_frames_file")
fullname = os.path.join(self.handler_root, lname)
if os.path.exists(fullname):
spec["kitti_config"]["kitti_sequence_to_frames_file"] = fullname
return spec, config
def post_run(self):
"""Carry's out functions after the job is executed"""
if self.network not in ("efficientdet_tf1", "mask_rcnn"):
# Get classes information into a file
categorical = get_handler_job_metadata(self.handler_id, self.job_id).get("result").get("categorical", [])
classes = ["car", "person"]
if len(categorical) > 0:
cwv = categorical[0]["category_wise_values"]
classes = [cat_val_dict["category"] for cat_val_dict in cwv]
with open(os.path.join(self.handler_root, "classes.json"), "w", encoding='utf-8') as f:
f.write(json.dumps(classes))
# Remove warning file(s) from tfrecords directory
tfwarning_path = os.path.join(self.handler_root, "tfrecords", "tfrecords_warning.json")
if os.path.exists(tfwarning_path):
os.remove(tfwarning_path)
tfwarning_path_idx = os.path.join(self.handler_root, "tfrecords", "idx-tfrecords_warning.json")
if os.path.exists(tfwarning_path_idx):
os.remove(tfwarning_path_idx)
class UNETDatasetConvert(CLIPipeline):
"""Class for Unet's dataset tfrecords conversion"""
def generate_config(self):
"""Modify the spec parameters necessary for object detection convert and return the modified dictionary"""
spec_json_path = os.path.join(self.handler_spec_root, "convert.json")
if os.path.exists(spec_json_path):
spec = load_json_spec(spec_json_path)
else:
spec = {}
config = {"coco_file": CLI_CONFIG_TO_FUNCTIONS["od_annotations"](self.job_context, self.handler_metadata),
"results_dir": os.path.join(self.handler_root, "masks")}
if spec.get("num_files"):
config["num_files"] = spec.get("num_files")
return spec, config
def generate_run_command(self):
"""Generate run command"""
network = "unet"
action = "dataset_convert"
params_to_cli = build_cli_command(self.config)
run_command = f"{network} {action} {params_to_cli}"
status_file = os.path.join(self.handler_root, self.job_name, "status.json")
return run_command, status_file, None
def post_run(self):
"""Carry's out functions after the job is executed"""
masks_dir = os.path.join(self.handler_root, "masks")
images_dir = os.path.join(self.handler_root, "images")
# write intersection to masks.txt and images.txt
masks_txt = os.path.join(self.handler_root, "masks.txt")
images_txt = os.path.join(self.handler_root, "images.txt")
with open(images_txt, "w", encoding='utf-8') as im_file, open(masks_txt, "w", encoding='utf-8') as ma_file:
available_masks = [m.split(".")[0] for m in os.listdir(masks_dir)]
for image in os.listdir(images_dir):
im_name = image.split(".")[0]
if im_name in available_masks:
im_file.write(os.path.join(images_dir, image + "\n"))
ma_file.write(os.path.join(masks_dir, im_name + ".png\n"))
ODAugment = TrainVal
class Dnv2Inference(CLIPipeline):
"""Class for detectnet_v2 specific changes required during inference"""
def generate_config(self):
"""Makes necessaary changes to the spec parameters for detectnet v2 inference"""
network = "detectnet_v2"
action = "inference"
# Infer CLI params
config = {}
network_config = read_network_config(network)
if action in network_config["cli_params"].keys():
for field_name, inference_fn in network_config["cli_params"][f"{action}{self.action_suffix}"].items():
field_value = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata)
if field_value:
config[field_name] = field_value
# Read spec from <action>.json for train, resume train, evaluate, retrain. If not there, use train.json
spec_json_path = os.path.join(self.handler_spec_root, action + ".json")
spec = load_json_spec(spec_json_path) # Dnv2 NEEDS inference spec
# As per regular TrainVal, do not infer spec params, no need to move spec to cli
# No need to add dataset configs / classwise configs
# Instead do the following: if parent is tlt, enter tlt config and parent is trt, enter trt config
parent_job_id = self.job_context.parent_id
parent_action = get_handler_job_metadata(self.handler_id, parent_job_id).get("action") # This should not fail if dependency passed
if parent_action in ["export", "gen_trt_engine", "trtexec"]:
key = "inferencer_config.tensorrt_config.trt_engine"
else:
key = "inferencer_config.tlt_config.model"
parent_model = CLI_CONFIG_TO_FUNCTIONS["parent_model_evaluate"](self.job_context, self.handler_metadata)
if parent_model:
write_nested_dict(spec, key, parent_model)
# Move CLI params from spec to config
spec_keys_all = copy.deepcopy(list(spec.keys())) # Since we will be popping the value out, spec would change @ each iteration
for field_name in spec_keys_all:
cnd1 = field_name in network_config["cli_params"][action].keys()
cnd2 = network_config["cli_params"][f"{action}{self.action_suffix}"].get(field_name, None) == "from_csv"
cnd3 = type(spec[field_name]) in [str, float, int, bool]
if cnd1 and cnd2 and cnd3:
config[field_name] = spec.pop(field_name)
return spec, config
class AutoMLPipeline(ActionPipeline):
"""AutoML pipeline which carry's out network specific param changes; generating run commands and creating job for individual experiments"""
def __init__(self, job_context):
"""Initialize the AutoMLPipeline class"""
super().__init__(job_context)
self.job_root = self.handler_root + f"/{self.job_context.id}"
self.rec_number = self.get_recommendation_number()
self.expt_root = f"{self.job_root}/experiment_{self.rec_number}"
self.recs_dict = load_json_data(json_file=f"{self.job_root}/controller.json")
self.brain_dict = load_json_data(json_file=f"{self.job_root}/brain.json")
if not os.path.exists(self.expt_root):
os.makedirs(self.expt_root)
def add_ptm_dependency(self, recommended_values):
"""Add PTM as a dependency if backbone or num_layers is part of hyperparameter sweep"""
# See if a ptm is needed (if not searching num_layers / backbone, no PTM), just take default
ptm_id = None
if "backbone" in recommended_values.keys() or "num_layers" in recommended_values.keys():
for dep in self.job_context.dependencies:
if dep.type == "automl_ptm":
ptm_id = dep.name
break
if ptm_id:
recommended_values["ptm"] = search_for_ptm(get_handler_root(ptm_id))
def generate_config(self, recommended_values):
"""Generate config for AutoML experiment"""
spec_json_path = os.path.join(get_handler_spec_root(self.job_context.handler_id), "train.json")
spec = load_json_spec(spec_json_path)
epoch_multiplier = self.brain_dict.get("epoch_multiplier", None)
if epoch_multiplier is not None:
current_ri = int(self.brain_dict.get("ri", {"0": [float('-inf')]})[str(self.brain_dict.get("bracket", 0))][0])
for param_type in ("automl_spec_params", "automl_cli_params"):
for field_name, inference_fn in self.network_config[param_type].items():
if "automl_" in inference_fn:
field_value = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata, self.job_root, self.rec_number)
elif "assign_const_value" in inference_fn:
if epoch_multiplier:
field_value = int(epoch_multiplier * current_ri)
if self.network == "mask_rcnn":
field_value = int(field_value * (spec["num_examples_per_epoch"] / spec["train_batch_size"]))
else:
field_value = int(read_nested_dict(spec, field_name))
if "assign_const_value," in inference_fn:
dependent_parameter_names = inference_fn.split(",")
dependent_field_value = int(read_nested_dict(spec, dependent_parameter_names[1]))
if len(dependent_parameter_names) == 2:
field_value = min(field_value, dependent_field_value)
elif len(dependent_parameter_names) == 3:
field_value = int(read_nested_dict(spec, dependent_parameter_names[2]))
if self.network == "segformer" and "logging_interval" in field_name:
field_value -= 1
else:
field_value = CLI_CONFIG_TO_FUNCTIONS[inference_fn](self.job_context, self.handler_metadata)
if field_value:
if param_type == "automl_spec_params":
write_nested_dict(spec, field_name, field_value)
else:
self.config[field_name] = field_value
spec = DS_CONFIG_TO_FUNCTIONS[self.network](spec, self.job_context, self.handler_metadata)
print("Loaded AutoML specs", file=sys.stderr)
for param_name, param_value in recommended_values.items():
write_nested_dict(spec, param_name, param_value)
validate_gpu_param_value(spec)
# Move CLI params from spec to config
spec_keys_all = copy.deepcopy(list(spec.keys())) # Since we will be popping the value out, spec would change @ each iteration
for field_name in spec_keys_all:
cnd1 = field_name in self.network_config["automl_cli_params"].keys()
cnd2 = self.network_config["automl_cli_params"].get(field_name, None) == "from_csv"
cnd3 = type(spec[field_name]) in [str, float, int, bool]
if cnd1 and cnd2 and cnd3:
self.config[field_name] = spec.pop(field_name)
if self.network not in AUTOML_DISABLED_NETWORKS:
spec = process_classwise_config(spec)
# Save specs to a yaml/kitti file
updated_spec_string = SPEC_BACKEND_TO_FUNCTIONS[self.api_params["spec_backend"]](spec)
action_spec_path = os.path.join(self.job_root, f"recommendation_{self.rec_number}.{self.api_params['spec_backend']}")
with open(action_spec_path, "w", encoding='utf-8') as f:
f.write(updated_spec_string)
def generate_run_command(self):
"""Generate the command to be run inside docker for AutoML experiment"""
params_to_cli = build_cli_command(self.config)
run_command = f"{self.network} train {params_to_cli}"
logfile = os.path.join(self.expt_root, "log.txt")
run_command += f" > {logfile} 2>&1 >> {logfile}"
return run_command
def get_recommendation_number(self):
"""Return the current recommendation number"""
rec_number = None
for dep in self.job_context.dependencies:
if dep.type == "automl":
rec_number = int(dep.name)
break
return rec_number
def write_json(self, file_path, json_dict):
"""Write a json file"""
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(json_dict, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
def run(self):
"""Calls necessary setup functions; calls job creation; update status of the job"""
try:
recommended_values = self.recs_dict[self.rec_number].get("specs", {})
self.add_ptm_dependency(recommended_values)
self.generate_config(recommended_values)
run_command = self.generate_run_command()
# Assign a new job id if not assigned already
job_id = self.recs_dict[self.rec_number].get("job_id", None)
if not job_id:
job_id = str(uuid.uuid4())
print("New job id being assigned to recommendation", job_id, file=sys.stderr)
self.recs_dict[self.rec_number]["job_id"] = job_id
self.write_json(file_path=f"{self.job_root}/controller.json", json_dict=self.recs_dict)
print(run_command, file=sys.stderr)
# Wait for existing AutoML jobs to complete
wait_for_job_completion(job_id)
delete_lingering_checkpoints(self.recs_dict[self.rec_number].get("best_epoch_number", ""), self.expt_root)
jobDriver.create(job_id, self.image, run_command, num_gpu=-1)
print(f"AutoML recommendation with experiment id {self.rec_number} and job id {job_id} submitted", file=sys.stderr)
k8s_status = jobDriver.status(job_id)
while k8s_status in ["Done", "Error", "Running", "Pending", "Creating"]:
time.sleep(5)
if os.path.exists(os.path.join(self.expt_root, "log.txt")):
break
if k8s_status == "Error":
print(f"Relaunching job {job_id}", file=sys.stderr)
wait_for_job_completion(job_id)
jobDriver.create(job_id, self.image, run_command, num_gpu=-1)
k8s_status = jobDriver.status(job_id)
return True
except Exception:
print(f"AutoMLpipeline for network {self.network} failed due to exception {traceback.format_exc()}", file=sys.stderr)
job_id = self.recs_dict[self.rec_number].get("job_id", "")
print(job_id, file=sys.stderr)
self.recs_dict[self.rec_number]["status"] = "failure"
self.write_json(file_path=f"{self.job_root}/controller.json", json_dict=self.recs_dict)
update_job_status(self.handler_id, self.job_context.id, status="Error")
jobDriver.delete(self.job_context.id)
return False
# Each Element can be called with a job_context and returns an ActionPipeline (or its derivative) object
ACTIONS_TO_FUNCTIONS = {"train": TrainVal,
"evaluate": TrainVal,
"prune": CLIPipeline,
"prune_tf2": TrainVal,
"prune_with_spec": TrainVal,
"retrain": TrainVal,
"export": CLIPipeline,
"export_with_spec": TrainVal,
"export_tf2": TrainVal,
"inference": TrainVal,
"dnv2inference": Dnv2Inference,
"gen_trt_engine": TrainVal,
"trtexec": TrainVal,
"purpose_built_models_ds_convert": TrainVal,
"odconvert": ODConvert,
"pyt_odconvert": TrainVal,
"unetdatasetconvert": UNETDatasetConvert,
"odconvertindex": ODConvert,
"odconvertefficientdet_tf1": ODConvert,
"odconvertefficientdet_tf2": ODConvert,
"odaugment": ODAugment,
"data_services": TrainVal
}
| tao_front_end_services-main | api/handlers/actions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API handlers module"""
| tao_front_end_services-main | api/handlers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML handler modules"""
import os
import json
from handlers import stateless_handlers
from handlers.stateless_handlers import get_handler_type, get_root
from handlers.utilities import Code
from handlers.docker_images import DOCKER_IMAGE_MAPPER
from job_utils.automl_job_utils import on_delete_automl_job
import uuid
from job_utils import executor as jobDriver
import sys
# TODO Make sure the image name is current docker tag of the API
image = DOCKER_IMAGE_MAPPER["api"]
class AutoMLHandler:
"""
Handler class for AutoML jobs
Start: Start controller as a K8s job
Stop: Stop controller and stop the autoML recommendation that is running
Resume: Same as Start (Since Controller's constructor allows for restore)
Delete: Same as AppHandler
Download: Same as AppHandler
Retrieve: Construct the JobSchema based on Controller's status.json
"""
@staticmethod
def start(user_id, model_id, handler_metadata):
"""Starts an automl job by running automl_start.py file"""
automl_job_id = str(uuid.uuid4())
root = get_root() + f"{user_id}/models/{model_id}/{automl_job_id}/"
if not os.path.exists(root):
os.makedirs(root)
network = get_handler_type(model_id)
automl_algorithm = handler_metadata.get("automl_algorithm", "Bayesian")
automl_max_recommendations = handler_metadata.get("automl_max_recommendations", 20)
automl_delete_intermediate_ckpt = handler_metadata.get("automl_delete_intermediate_ckpt", True)
automl_R = handler_metadata.get("automl_R", 27)
automl_nu = handler_metadata.get("automl_nu", 3)
metric = handler_metadata.get("metric", "map")
epoch_multiplier = handler_metadata.get("epoch_multiplier", 1)
automl_add_hyperparameters = handler_metadata.get("automl_add_hyperparameters", "")
automl_remove_hyperparameters = handler_metadata.get("automl_remove_hyperparameters", "")
# Call the script
print("Starting automl", automl_job_id, file=sys.stderr)
run_command = 'umask 0 && unzip -q /opt/ngccli/ngccli_linux.zip -d /opt/ngccli/ && /opt/ngccli/ngc-cli/ngc --version && '
run_command += f'/venv/bin/python3 automl_start.py --root={root} --automl_job_id={automl_job_id} --network={network} --model_id={model_id} --resume=False --automl_algorithm={automl_algorithm} --automl_max_recommendations={automl_max_recommendations} --automl_delete_intermediate_ckpt={automl_delete_intermediate_ckpt} --automl_R={automl_R} --automl_nu={automl_nu} --metric={metric} --epoch_multiplier={epoch_multiplier} --automl_add_hyperparameters="{automl_add_hyperparameters}" --automl_remove_hyperparameters="{automl_remove_hyperparameters}"'
jobDriver.create(automl_job_id, image, run_command, num_gpu=0) # TODO: Commented for testing only
# Test by directly calling the automl_start function blocking the flow
# automl_start(root,jc,False)
# This is the AutoML signature
# assert os.path.exists(root+"/controller.log")
return Code(201, [automl_job_id], "AutoML running")
@staticmethod
def stop(user_id, model_id, job_id):
"""Stops a running automl job"""
print("Stopping automl", file=sys.stderr)
try:
jobDriver.delete(job_id)
except:
return Code(404, [], "job cannot be stopped in platform")
# Remove any pending jobs from Workflow queue
try:
on_delete_automl_job(model_id, job_id)
except:
return Code(200, [job_id], "job cancelled, and no pending recommendations")
# TODO: Move the best model to weights/model.tlt
return Code(200, [job_id], "job cancelled")
@staticmethod
def retrieve(user_id, model_id, job_id):
"""Retrieves a running automl job and writes the stats to job_id.json in job_metadata folder"""
print("Retrieving automl", file=sys.stderr)
root = get_root() + f"{user_id}/models/{model_id}/{job_id}/"
stats = {}
json_file = os.path.join(root, "automl_metadata.json")
if not os.path.exists(json_file):
return Code(400, {}, "No AutoML run found")
try:
with open(json_file, "r", encoding='utf-8') as f:
stats = json.load(f)
except:
stats["message"] = "Stats will be updated in a few seconds"
# Check if job is running / error / done / pending
# Skipping this as automl pods are deleted upon completion and we can't use kubernetes api to fetch the status
# The status is updated in controller.py
# k8s_status = jobDriver.status(job_id)
# stateless_handlers.update_job_status(model_id,job_id,status=k8s_status)
# Create a JobResult schema and update the jobs_metadata/<automl_job_id>.json
path = os.path.join(stateless_handlers.get_root(), user_id, "models", model_id, "jobs_metadata", job_id + ".json")
job_meta = stateless_handlers.load_json_data(path)
print("job_meta", job_meta, file=sys.stderr)
job_meta["result"] = {}
job_meta["result"]["stats"] = []
job_meta["result"]["automl_result"] = []
for key, value in stats.items():
if "best_" in key:
job_meta["result"]["automl_result"].append({"metric": key, "value": value})
else:
job_meta["result"]["stats"].append({"metric": key, "value": str(value)})
with open(path, "w+", encoding='utf-8') as f:
f.write(json.dumps(job_meta, indent=4))
return Code(200, job_meta, "Job retrieved")
@staticmethod
def resume(user_id, model_id, job_id, handler_metadata):
"""Resumes a stopped automl job"""
print("Resuming automl", job_id, file=sys.stderr)
automl_job_id = job_id
root = get_root() + f"{user_id}/models/{model_id}/{automl_job_id}/"
if not os.path.exists(root):
os.makedirs(root)
network = get_handler_type(model_id)
automl_algorithm = handler_metadata.get("automl_algorithm", "Bayesian")
automl_max_recommendations = handler_metadata.get("automl_max_recommendations", 20)
automl_delete_intermediate_ckpt = handler_metadata.get("automl_delete_intermediate_ckpt", True)
automl_R = handler_metadata.get("automl_R", 27)
automl_nu = handler_metadata.get("automl_nu", 3)
metric = handler_metadata.get("metric", "map")
epoch_multiplier = handler_metadata.get("epoch_multiplier", 1)
automl_add_hyperparameters = handler_metadata.get("automl_add_hyperparameters", "")
automl_remove_hyperparameters = handler_metadata.get("automl_remove_hyperparameters", "")
# Call the script
run_command = f'/venv/bin/python3 automl_start.py --root={root} --automl_job_id={automl_job_id} --network={network} --model_id={model_id} --resume=True --automl_algorithm={automl_algorithm} --automl_max_recommendations={automl_max_recommendations} --automl_delete_intermediate_ckpt={automl_delete_intermediate_ckpt} --automl_R={automl_R} --automl_nu={automl_nu} --metric={metric} --epoch_multiplier={epoch_multiplier} --automl_add_hyperparameters="{automl_add_hyperparameters}" --automl_remove_hyperparameters="{automl_remove_hyperparameters}"'
jobDriver.create(job_id, image, run_command, num_gpu=0)
# This is the AutoML signature
# assert os.path.exists(root+"/controller.log")
print("Resume reached here", file=sys.stderr)
return Code(200, [automl_job_id], "AutoML resumed")
| tao_front_end_services-main | api/handlers/automl_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job chaining modules"""
import copy
from handlers.utilities import JobContext
from handlers.utilities import read_network_config
from handlers.stateless_handlers import get_handler_type, get_handler_metadata_with_jobs
class ChainingRules:
"""Class for defining rules of chaining jobs together"""
def __init__(self, chainable, chained_only):
"""Intialize ChainingRules class
Args:
chainable: defines chaining rules
chained_only: actions that fail without a parent job ID
"""
self._chainable = chainable
self._chained_only = chained_only
def chainable(self, child, parent):
"""Defines chaining runes"""
assert child in list(self._chainable.keys()), "Action not part of Chainer pipeline"
return parent in self._chainable[child]
def chained_only(self, action):
"""Returns actions that can run only with a parent job(chaining)"""
return action in self._chained_only
# Actions chaining rules are defined here
# Read as evaluate can be chained after ["train",..."export"]
_cvaction_rules = {"train": [],
"evaluate": ["train", "prune", "retrain", "export", "gen_trt_engine", "trtexec"],
"prune": ["train", "retrain"],
"inference": ["train", "prune", "retrain", "export", "gen_trt_engine", "trtexec"],
"inference_seq": ["train", "prune", "retrain", "export"],
"inference_trt": ["train", "prune", "retrain", "export"],
"retrain": ["train", "prune"],
"export": ["train", "prune", "retrain"],
"calibration_tensorfile": ["train", "prune", "retrain"],
"gen_trt_engine": ["export"],
"trtexec": ["export"],
"confmat": ["train", "prune", "retrain"]}
_cvaction_chainedonly = ["prune", "retrain", "export", "gen_trt_engine", "trtexec", "calibration_tensorfile"]
CVAction = ChainingRules(_cvaction_rules, _cvaction_chainedonly)
# OD Dataset chaining rules => Basically says that chaining does not matter
# NOTE: convert writes into tfrecords directory
ODAction = ChainingRules({"convert": ["augment"],
"convert_and_index": ["augment"],
"convert_efficientdet_tf1": ["augment"],
"convert_efficientdet_tf2": ["augment"],
"kmeans": [],
"augment": []}, [])
_dsaction_rules = {"generate": [],
"convert": [],
"validate": [],
"analyze": []}
DSAction = ChainingRules(_dsaction_rules, [])
CHAINING_RULES_TO_FUNCTIONS = {"cvaction": CVAction,
"dsaction": DSAction,
"odaction": ODAction}
def infer_action_from_job(handler_id, job_id):
"""Takes handler, job_id (UUID / str) and returns action corresponding to that jobID"""
job_id = str(job_id)
action = ""
all_jobs = get_handler_metadata_with_jobs(handler_id)["jobs"]
for job in all_jobs:
if job["id"] == job_id:
action = job["action"]
break
return action
def _create_job_contexts(parent_job_id, parent_action, actions, job_ids, network, chaining_rules, handler_id):
"""Create job contexts for the job_id's provided"""
job_contexts = []
for idx, jid in enumerate(job_ids):
job_id = str(jid)
action = actions[idx]
# Create a jobconext
job_context = JobContext(job_id, None, network, action, handler_id)
job_contexts.append(job_context)
completed_tasks_master = []
# See if parent_job is given
if parent_job_id:
completed_tasks_master = [(parent_job_id, parent_action)]
# Run actions one-by-one
for idx, action in enumerate(actions):
# Create a jobconext
job_context = job_contexts[idx]
job_id = job_context.id
completed_tasks_itr = copy.deepcopy(completed_tasks_master)
# Check for a proper parent job
for par_job in reversed(completed_tasks_itr):
par_job_id, par_action = par_job
if chaining_rules.chainable(action, par_action):
# Simply call the action Pipeline
job_context.parent_id = par_job_id
completed_tasks_master.append((job_id, action))
break
# If no proper parent job found
else:
# If action is only chained
if chaining_rules.chained_only(action):
job_context.status = "Error"
continue
# If action can be standalone with no parent.
# Simply call the action Pipeline
job_context.parent_id = None # Update parent JobID
completed_tasks_master.append((job_id, action)) # List of completed actions
# Update the job contexts after chainer parsing is done
for jc in job_contexts:
jc.write()
return job_contexts
def create_job_contexts(parent_job_id, actions, job_ids, handler_id):
"""Calls the create job contexts function after Obtains the necessary additional info fo"""
parent_action = infer_action_from_job(handler_id, parent_job_id)
network = get_handler_type(handler_id)
if not network:
return []
network_config = read_network_config(network)
chaining_rules = CHAINING_RULES_TO_FUNCTIONS[network_config["api_params"]["chaining_rules"]]
return _create_job_contexts(parent_job_id, parent_action, actions, job_ids, network, chaining_rules, handler_id)
| tao_front_end_services-main | api/handlers/chaining.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Stateless handlers modules"""
import datetime
import glob
import json
import os
import functools
def get_root():
"""Return root path"""
return os.environ.get("TAO_ROOT", "/shared/users/")
# Sub for handler.root with handler_root(handler_id)
@functools.lru_cache(maxsize=256)
def get_handler_root(handler_id):
"""Return handler root path"""
pattern = get_root() + "**/**/**"
elements = glob.glob(pattern)
for ele in elements:
if os.path.basename(ele.rstrip("///")) == handler_id:
return ele
return ""
def get_handler_job_metadata(handler_id, job_id):
"""Return metadata info present in job_id.json inside jobs_metadata folder"""
# Only metadata of a particular job
handler_root = get_handler_root(handler_id)
job_metadata_file = handler_root + f"/jobs_metadata/{job_id}.json"
if not os.path.exists(job_metadata_file):
return {}
with open(job_metadata_file, "r", encoding='utf-8') as f:
metadata = json.load(f)
return metadata
def get_toolkit_status(handler_id, job_id):
"""Returns the status of the job reported from the frameworks container"""
metadata_info = get_handler_job_metadata(handler_id, job_id)
toolkit_status = ""
result_dict = metadata_info.get("result", "")
if result_dict:
toolkit_detailed_status = result_dict.get("detailed_status", "")
if toolkit_detailed_status:
toolkit_status = toolkit_detailed_status.get("status", "")
return toolkit_status
def json_serializable(response):
"""Check if response is json serializable"""
try:
json.dumps(response.json())
return True
except:
return False
# Sub for handler.spec_root with handler_root(handler_id)
def get_handler_spec_root(handler_id):
"""Return path of specs folder under handler_root"""
return get_handler_root(handler_id) + "/specs/"
# Sub for handler.metadata_file with handler_root(handler_id)
def get_handler_metadata_file(handler_id):
"""Return path of metadata.json under handler_root"""
return get_handler_root(handler_id) + "/metadata.json"
def get_handler_log_root(handler_id):
"""Return path of logs folder under handler_root"""
return get_handler_root(handler_id) + "/logs/"
def get_handler_jobs_metadata_root(handler_id):
"""Return path of job_metadata folder folder under handler_root"""
return get_handler_root(handler_id) + "/jobs_metadata/"
def load_json_data(json_file):
"""Read data from json file"""
metadata = {}
if os.path.exists(json_file):
with open(json_file, "r", encoding='utf-8') as f:
metadata = json.load(f)
return metadata
def get_handler_metadata(handler_id):
"""Return metadata info present in metadata.json inside handler_root"""
metadata_file = get_handler_metadata_file(handler_id)
metadata = load_json_data(metadata_file)
return metadata
def get_handler_metadata_with_jobs(handler_id):
"""Return a list of job_metadata info of multiple jobs"""
metadata = get_handler_metadata(handler_id)
metadata["jobs"] = []
job_metadatas_root = get_handler_jobs_metadata_root(handler_id)
for json_file in glob.glob(job_metadatas_root + "*.json"):
metadata["jobs"].append(load_json_data(json_file))
return metadata
def write_job_metadata(handler_id, job_id, metadata):
"""Write job metadata info present in jobs_metadata folder"""
handler_root = get_handler_root(handler_id)
job_metadata_file = handler_root + f"/jobs_metadata/{job_id}_tmp.json"
with open(job_metadata_file, "w+", encoding='utf-8') as f:
f.write(json.dumps(metadata, indent=4))
job_metadata_file_orig = handler_root + f"/jobs_metadata/{job_id}.json"
os.rename(job_metadata_file, job_metadata_file_orig)
def update_job_status(handler_id, job_id, status):
"""Update the job status in jobs_metadata/job_id.json"""
metadata = get_handler_job_metadata(handler_id, job_id)
if status != metadata.get("status", ""):
metadata["last_modified"] = datetime.datetime.now().isoformat()
metadata["status"] = status
write_job_metadata(handler_id, job_id, metadata)
def update_job_results(handler_id, job_id, result):
"""Update the job results in jobs_metadata/job_id.json"""
metadata = get_handler_job_metadata(handler_id, job_id)
if result != metadata.get("result", {}):
metadata["last_modified"] = datetime.datetime.now().isoformat()
metadata["result"] = result
write_job_metadata(handler_id, job_id, metadata)
def get_handler_user(handler_id):
"""Return the user id for the handler id provided"""
# Get the handler_root in all the paths
hander_root = get_handler_root(handler_id)
# Remove ant final backslashes in the path and take 3rd element from last
return hander_root.rstrip("///").split("/")[-3]
def get_handler_type(handler_id):
"""Return the handler type"""
handler_metadata = get_handler_metadata(handler_id)
network = handler_metadata.get("network_arch", None)
if not network:
network = handler_metadata.get("type", None)
return network
def make_root_dirs(user_id, kind, handler_id):
"""Create root dir followed by logs, jobs_metadata and specs folder"""
root = get_root() + f"{user_id}/{kind}/{handler_id}/"
log_root = root + "logs/"
jobs_meta_root = root + "jobs_metadata/"
spec_root = root + "specs/"
for directory in [root, log_root, jobs_meta_root, spec_root]:
if not os.path.exists(directory):
os.makedirs(directory)
def check_existence(handler_id, kind):
"""Check if metadata.json exists"""
if kind not in ["dataset", "model"]:
return False
existence = bool(glob.glob(get_root() + f"**/{kind}s/{handler_id}/metadata.json"))
return existence
def check_read_access(user_id, handler_id):
"""Check if the user has read access to this particular handler"""
handler_user = get_handler_user(handler_id)
under_user = handler_user == user_id
handler_metadata = get_handler_metadata(handler_id)
public = handler_metadata.get("public", False) # Default is False
if under_user:
return True
if public:
return True
return False
def check_write_access(user_id, handler_id):
"""Check if the user has write access to this particular handler"""
handler_user = get_handler_user(handler_id)
under_user = handler_user == user_id
handler_metadata = get_handler_metadata(handler_id)
public = handler_metadata.get("public", False) # Default is False
read_only = handler_metadata.get("read_only", False) # Default is False
if under_user: # If under user, you can always write - no point in making it un-writable by owner. Read-only is for non-owners
return True
if public:
if read_only:
return False
return True
return False
def get_public_models():
"""Get public models"""
# Make sure to check if it exists
public_models = []
all_models_metadata = get_root() + "**/models/**/metadata.json"
for metadata_file in glob.glob(all_models_metadata):
metadata = load_json_data(metadata_file)
public = metadata.get("public", False)
if public:
public_models.append(metadata.get("id"))
return list(set(public_models))
def get_public_datasets():
"""Get public datasets"""
public_datasets = []
return list(set(public_datasets))
def add_public_model(model_id):
"""Add public model"""
# if model_id in get_public_models():
# return
return
def add_public_dataset(dataset_id):
"""Add public dataset"""
# if dataset_id in get_public_datasets():
# return
return
def remove_public_model(model_id):
"""Remove public model"""
# if model_id not in get_public_models():
# return
return
def remove_public_dataset(dataset_id):
"""Remove public dataset"""
# if dataset_id not in get_public_datasets():
# return
return
def check_dataset_type_match(user_id, model_meta, dataset_id, no_raw=None):
"""Checks if the dataset created for the model is valid dataset_type"""
# If dataset id is None, then return True
# Else, if all things match, return True
# True means replace, False means skip and return a 400 Code
if dataset_id is None:
return True
if not check_existence(dataset_id, "dataset"):
return False
if not check_read_access(user_id, dataset_id):
return False
dataset_meta = get_handler_metadata(dataset_id)
model_dataset_type = model_meta.get("dataset_type")
dataset_type = dataset_meta.get("type")
dataset_format = dataset_meta.get("format")
if model_dataset_type != dataset_type:
return False
if no_raw:
if dataset_format in ("raw", "coco_raw"):
return False
return True
def check_model_type_match(user_id, model_meta, ptm_ids):
"""Checks if the model created and ptm requested belong to the same network"""
if ptm_ids is None:
return True
for ptm_id in ptm_ids:
if not check_existence(ptm_id, "model"):
return False
if not check_read_access(user_id, ptm_id):
return False
ptm_meta = get_handler_metadata(ptm_id)
model_arch = model_meta.get("network_arch")
ptm_arch = ptm_meta.get("network_arch")
if model_arch != ptm_arch:
return False
return True
def model_update_handler_attributes(user_id, model_meta, key, value):
"""Checks if the artifact provided is of the correct type"""
# Returns value or False
if key in ["train_datasets"]:
if type(value) != list:
value = [value]
for dataset_id in value:
if not check_dataset_type_match(user_id, model_meta, dataset_id, no_raw=True):
return False
elif key in ["eval_dataset"]:
if not check_dataset_type_match(user_id, model_meta, value, no_raw=True):
return False
elif key in ["calibration_dataset", "inference_dataset"]:
if not check_dataset_type_match(user_id, model_meta, value):
return False
elif key in ["ptm"]:
if not check_model_type_match(user_id, model_meta, value):
return False
else:
return False
return value
| tao_front_end_services-main | api/handlers/stateless_handlers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper classes, functions, constants
Classes:
- Code
- JobContext
- StatusParser
Functions:
- run_system_command
- load_json_spec
- search_for_ptm
- get_ngc_download_command
- get_model_results_path
- write_nested_dict
- search_list_for_best_model
- build_cli_command
- read_network_config
Constants:
- VALID_DSTYPES
- VALID_NETWORKS
- IS_SPEC_NEEDED
"""
import os
import glob
import datetime
import json
import subprocess
import sys
import re
import uuid
import math
from handlers.stateless_handlers import get_handler_root, get_handler_job_metadata
# Helper Classes
class TAOResponse:
"""Helper class for API reponse"""
def __init__(self, code, data):
"""Initialize TAOResponse helper class"""
self.code = code
self.data = data
def Code(code, data={}, msg=""):
"""Wraps TAOResponse and returns appropriate responses"""
if code in [200, 201]:
return TAOResponse(code, data)
if code in [400, 404]:
error_data = {"error_desc": msg, "error_code": code}
return TAOResponse(code, error_data)
error_data = {"error_desc": msg, "error_code": code}
return TAOResponse(404, error_data)
class JobContext:
"""Class for holding job related information"""
# Initialize Job Related fields
# Contains API related parameters
# ActionPipeline interacts with Toolkit and uses this JobContext
def __init__(self, job_id, parent_id, network, action, handler_id, created_on=None):
"""Initialize JobContext class"""
# Non-state variables
self.id = job_id
self.parent_id = parent_id
self.network = network
self.action = action
self.handler_id = handler_id
self.created_on = created_on
if not self.created_on:
self.created_on = datetime.datetime.now().isoformat()
# State variables
self.last_modified = datetime.datetime.now().isoformat()
self.status = "Pending" # Starts off like this
self.result = {}
self.write()
def write(self):
"""Write the schema dict to jobs_metadata/job_id.json file"""
# Create a job metadata
job_metadata_file = get_handler_root(self.handler_id) + f"/jobs_metadata/{self.id}.json"
with open(job_metadata_file, "w", encoding='utf-8') as f:
f.write(json.dumps(self.schema(), indent=4))
def __repr__(self):
"""Returns the schema dict"""
return self.schema().__repr__()
# ModelHandler / DatasetHandler interacts with this function
def schema(self):
"""Creates schema dict based on the member variables"""
_schema = { # Cannot modify
"id": self.id,
"parent_id": self.parent_id,
"action": self.action,
"created_on": self.created_on,
# Can modify
"last_modified": self.last_modified,
"status": self.status,
"result": self.result}
return _schema
class StatusParser:
"""Class for parsing status.json"""
def __init__(self, status_file, network, results_dir):
"""Intialize StatusParser class"""
self.status_file = status_file
self.network = network
self.results_dir = results_dir
self.cur_line = 0
# Initialize results
self.results = {}
# Logging fields
self.results["date"] = ""
self.results["time"] = ""
self.results["status"] = ""
self.results["message"] = ""
# Categorical
self.results["categorical"] = {}
# KPI
self.results["kpi"] = {}
# Graphical
self.results["graphical"] = {}
self.last_seen_epoch = -1
#
self.gr_dict_cache = []
def _update_categorical(self, status_dict):
"""Update categorical key of status line"""
if "epoch" in status_dict:
self.last_seen_epoch = status_dict["epoch"]
if "cur_iter" in status_dict and self.network in _ITER_MODELS:
self.last_seen_epoch = status_dict["cur_iter"]
# Categorical
if "categorical" in status_dict:
cat_dict = status_dict["categorical"]
if type(cat_dict) != dict:
return
for _, value_dict in cat_dict.items():
if type(value_dict) != dict:
return
self.results["categorical"].update(cat_dict)
def _update_kpi(self, status_dict):
"""Update kpi key of status line"""
if "epoch" in status_dict:
self.last_seen_epoch = status_dict["epoch"]
if "cur_iter" in status_dict and self.network in _ITER_MODELS:
self.last_seen_epoch = status_dict["cur_iter"]
if "mode" in status_dict and status_dict["mode"] == "train":
return
if "kpi" in status_dict:
kpi_dict = status_dict["kpi"]
if type(kpi_dict) != dict:
return
for key, value in kpi_dict.items():
if type(value) == dict:
# Process it differently
float_value = StatusParser.force_float(value.get("value", None))
else:
float_value = StatusParser.force_float(value)
# Simple append to "values" if the list exists
if key in self.results["kpi"]:
# Metric info is present in duplicate lines for these network
if self.network in ("efficientdet_tf1"):
if "epoch" in status_dict and float_value:
float_value = None
if float_value is not None:
if self.last_seen_epoch not in self.results["kpi"][key]["values"].keys():
self.results["kpi"][key]["values"][self.last_seen_epoch] = float_value
else:
if float_value is not None:
self.results["kpi"][key] = {"values": {self.last_seen_epoch: float_value}}
def _update_graphical(self, status_dict):
"""Update graphical key of status line"""
if "epoch" in status_dict:
self.last_seen_epoch = status_dict["epoch"]
if "cur_iter" in status_dict and self.network in _ITER_MODELS:
self.last_seen_epoch = status_dict["cur_iter"]
if "graphical" in status_dict:
gr_dict = status_dict["graphical"]
# If the exact same dict was seen before, skip (an artefact of how status logger is written)
if gr_dict in self.gr_dict_cache:
return
self.gr_dict_cache.append(gr_dict)
if type(gr_dict) != dict:
return
for key, value in gr_dict.items():
plot_helper_dict = {}
if type(value) == dict:
# Process it differently
float_value = StatusParser.force_float(value.get("value", None))
# Store x_min, x_max, etc... if given
for plot_helper_key in ["x_min", "x_max", "y_min", "y_max", "units"]:
if value.get(plot_helper_key):
plot_helper_dict[plot_helper_key] = value.get(plot_helper_key)
else:
float_value = StatusParser.force_float(value)
# Simple append to "values" if the list exists
if key in self.results["graphical"]:
if key == "mean average precision":
# Mean average precision info is present in duplicate lines for these network
if self.network in ("dssd", "retinanet", "ssd", "yolo_v3", "yolo_v4", "yolo_v4_tiny"):
if "epoch" in status_dict and float_value:
float_value = None
if float_value is not None:
if self.last_seen_epoch not in self.results["graphical"][key]["values"].keys():
self.results["graphical"][key]["values"][self.last_seen_epoch] = float_value
else:
if self.last_seen_epoch not in self.results["graphical"][key]["values"].keys():
self.results["graphical"][key]["values"][self.last_seen_epoch] = float_value
else:
if (key != "mean average precision") or (key == "mean average precision" and float_value):
self.results["graphical"][key] = {"values": {self.last_seen_epoch: float_value}}
if key in self.results["graphical"]:
# Put together x_min, x_max, y_min, y_max
graph_key_vals = self.results["graphical"][key]["values"]
self.results["graphical"][key].update({"x_min": 0,
"x_max": len(graph_key_vals),
"y_min": 0,
"y_max": StatusParser.force_max([val for key, val in graph_key_vals.items()]),
"units": None})
# If given in value, then update x_min, x_max, etc...
self.results["graphical"][key].update(plot_helper_dict)
@staticmethod
def force_float(value):
"""Convert str to float"""
try:
if (type(value) == str and value.lower() in ["nan", "infinity", "inf"]) or (type(value) == float and (math.isnan(value) or value == float('inf') or value == float('-inf'))):
return None
return float(value)
except:
return None
@staticmethod
def force_min(values):
"""Return min elements in the list"""
values_no_none = [val for val in values if val is not None]
if values_no_none != []:
return min(values_no_none)
return 0
@staticmethod
def force_max(values):
"""Return max elements in the list"""
values_no_none = [val for val in values if val is not None]
if values_no_none != []:
return max(values_no_none)
return 1e10
def post_process_results(self):
"""Post process the status.json contents to be compatible with defined schema's in app.py"""
# Copy the results
processed_results = {}
# Detailed results
processed_results["detailed_status"] = {}
for key in ["date", "time", "status", "message"]:
processed_results["detailed_status"][key] = self.results[key]
# Categorical
processed_results["categorical"] = []
for key, value_dict in self.results["categorical"].items():
value_dict_unwrapped = [{"category": cat, "value": StatusParser.force_float(val)} for cat, val in value_dict.items()]
processed_results["categorical"].append({"metric": key, "category_wise_values": value_dict_unwrapped})
# KPI and Graphical
for result_type in ("kpi", "graphical"):
processed_results[result_type] = []
for key, value_dict in self.results[result_type].items():
dict_schema = {"metric": key}
dict_schema.update(value_dict)
processed_results[result_type].append(dict_schema)
# Continuous remain the same
for key in ["cur_iter", "epoch", "max_epoch", "eta", "time_per_epoch", "time_per_iter"]:
processed_results[key] = self.results.get(key, None)
return processed_results
def update_results(self):
"""Update results in status.json"""
if not os.path.exists(self.status_file):
# Try to find out status.json
sjsons = glob.glob(self.results_dir + "/**/status.json", recursive=True)
if sjsons:
self.status_file = sjsons[0]
# Read all the status lines in status.json till now
good_statuses = []
if os.path.exists(self.status_file):
with open(self.status_file, "r", encoding='utf-8') as f:
lines_to_process = f.readlines()[self.cur_line:]
for line in lines_to_process:
try:
status_dict = json.loads(str(line))
good_statuses.append(status_dict)
except:
continue
self.cur_line += 1
for status_dict in good_statuses:
# Logging fields
for key in ["date", "time", "status", "message"]:
if key in status_dict:
self.results[key] = status_dict[key]
# Categorical
self._update_categorical(status_dict)
# KPI
self._update_kpi(status_dict)
# Graphical
self._update_graphical(status_dict)
# Continuous
for key in status_dict:
if key in ["cur_iter", "epoch", "max_epoch", "eta", "time_per_epoch", "time_per_iter"]:
# verbosity is an additional status.json variable API does not process
self.results[key] = status_dict[key]
return self.post_process_results()
# Helper Functions
def load_json_spec(spec_json_path):
"""Load json and delete version key if present in the csv specs"""
try:
spec = {}
with open(spec_json_path, mode='r', encoding='utf-8-sig') as f:
spec = json.load(f)
if spec.get("version"):
del spec["version"]
return spec
except:
return {}
def run_system_command(command):
"""
Run a linux command - similar to os.system().
Waits till process ends.
"""
subprocess.run(['/bin/bash', '-c', command], stdout=subprocess.PIPE, check=False)
return 0
def search_for_ptm(root, extension="tlt", network=""):
"""Return path of the PTM file under the PTM root folder"""
# from root, return model
# if return is None, that means not hdf5 or tlt inside the folder
# search for hdf5 / tlt
# EfficientDet tf2 PTM is a not a single file
if network in ["classification_tf2", "efficientdet_tf2"]:
pretrained_root_folder_map = {"classification_tf2": "pretrained_classification_tf2_vefficientnet_b0",
"efficientdet_tf2": "pretrained_efficientdet_tf2_vefficientnet_b0"}
if len(glob.glob(root + "/**/*")) > 0:
return os.path.join(root, pretrained_root_folder_map[network])
return None
models = glob.glob(root + "/**/*.tlt", recursive=True) + glob.glob(root + "/**/*.hdf5", recursive=True) + glob.glob(root + "/**/*.pth", recursive=True) + glob.glob(root + "/**/*.pth.tar", recursive=True)
# if .tlt exists
if models:
model_path = models[0] # pick one arbitrarily
return model_path
# if no .tlt exists
return None
def get_ngc_download_command(root):
"""Frames a ngc command to download the PTM's from ngc"""
# check if metadata exists
metadata = glob.glob(root + "/metadata.json")
if not metadata:
return None
metadata = metadata[0]
# read metadata ngc_path
with open(metadata, "r", encoding='utf-8') as f:
meta_data = json.load(f)
ngc_path = meta_data.get("ngc_path", "")
network_arch = meta_data.get("network_arch", "")
additional_id_info = meta_data.get("additional_id_info", "")
# if no ngc patk
if ngc_path == "":
return None
# if ngc path, then download the model into some place inside root and then return a path to hdf5 / tlt
cmnd = f"TMPDIR=$(mktemp -d) && ngc registry model download-version --dest $TMPDIR/ {ngc_path} && chmod -R 777 $TMPDIR && cp -r $TMPDIR/* {root}/ && rm -rf $TMP_DIR"
# run and wait till it finishes / run in background
print("Executing NGC command: ", cmnd, file=sys.stderr)
return cmnd, network_arch, additional_id_info
def download_ptm(handler_ptm):
"""Calls the ngc model download command and removes the unnecessary files for some models containing multiple model files"""
if handler_ptm is None:
return None
ptm_root = get_handler_root(handler_ptm)
ptm_file = search_for_ptm(ptm_root)
if ptm_file is None:
ptm_download_command, network_arch, additional_id_info = get_ngc_download_command(ptm_root) # this will not be None since we check this earlier
subprocess.run(['/bin/bash', '-c', 'HOME=/var/www/ && ' + ptm_download_command], stdout=subprocess.PIPE, check=False)
# if prc failed => then ptm_file is None and we proceed without a ptm (because if ptm does not exist in ngc, it must not be loaded!)
ptm_file = search_for_ptm(ptm_root, network=network_arch)
if network_arch == "lprnet":
if additional_id_info == "us":
os.system(f"rm {ptm_root}/lprnet_vtrainable_v1.0/*ch_*")
elif additional_id_info == "ch":
os.system(f"rm {ptm_root}/lprnet_vtrainable_v1.0/*us_*")
elif network_arch == "action_recognition":
additional_id_info_list = additional_id_info.split(",")
if len(additional_id_info_list) == 1:
if additional_id_info_list[0] == "3d":
os.system(f"rm {ptm_root}/actionrecognitionnet_vtrainable_v1.0/*_2d_*")
elif additional_id_info_list[0] == "2d":
os.system(f"rm {ptm_root}/actionrecognitionnet_vtrainable_v1.0/*_3d_*")
if len(additional_id_info_list) == 2:
for ind_additional_id_info in additional_id_info_list:
if ind_additional_id_info == "a100":
os.system(f"rm {ptm_root}/actionrecognitionnet_vtrainable_v2.0/*xavier*")
elif ind_additional_id_info == "xavier":
os.system(f"rm {ptm_root}/actionrecognitionnet_vtrainable_v2.0/*a100*")
if ind_additional_id_info == "3d":
os.system(f"rm {ptm_root}/actionrecognitionnet_vtrainable_v2.0/*_2d_*")
elif ind_additional_id_info == "2d":
os.system(f"rm {ptm_root}/actionrecognitionnet_vtrainable_v2.0/*_3d_*")
return ptm_file
return ptm_file
def write_nested_dict(dictionary, key_dotted, value):
"""Merge 2 dicitonaries"""
ptr = dictionary
keys = key_dotted.split(".")
for key in keys[:-1]:
# This applies to the classwise_config case that save the configs with list
if type(ptr) != dict:
temp = {}
for ptr_dic in ptr:
temp.update(ptr_dic)
ptr = temp
ptr = ptr.setdefault(key, {})
ptr[keys[-1]] = value
def write_nested_dict_if_exists(target_dict, nested_key, source_dict, key):
"""Merge 2 dicitonaries if given key exists in the source dictionary"""
if key in source_dict:
write_nested_dict(target_dict, nested_key, source_dict[key])
# if key is not there, no update
def read_nested_dict(dictionary, flattened_key):
"""Returns the value of a flattened key separated by dots"""
for key in flattened_key.split("."):
value = dictionary[key]
dictionary = value
return value
def build_cli_command(config_data, spec_data=None):
"""Generate cli command from the values of config_data"""
# data is a dict
# cmnd generates --<field_name> <value> for all key,value in data
# Usage: To generate detectnet_v2 train --<> <> --<> <>,
# The part after detectnet_v2 train is generated by this
cmnd = ""
for key, value in config_data.items():
assert (type(value) != dict)
assert (type(value) != list)
if type(value) == bool:
if value:
cmnd += f"--{key} "
else:
cmnd += f"--{key}={value} "
return cmnd
def read_network_config(network):
"""Reads the network handler json config file"""
# CLONE EXISTS AT pretrained_models.py
_dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
config_json_path = os.path.join(_dir_path, "handlers", "network_configs", f"{network}.config.json")
cli_config = {}
with open(config_json_path, mode='r', encoding='utf-8-sig') as f:
cli_config = json.load(f)
return cli_config
# NOTE Deprecated function, but still used until classwise config is needed
# for any network - this follows dnv2 schema.
def process_classwise_config(data):
"""Modifies data to re-organize the classwise config into respective collections
Args: data: spec with classwise_config
Return: data
"""
if "classwise_config" not in data:
return data
if type(data["classwise_config"]) != list:
data["classwise_config"] = [data["classwise_config"]]
# see if top level conf names exist, if not create it => ideally we want all of these names to exist
for conf_name in ["bbox_rasterizer_config", "postprocessing_config", "cost_function_config", "evaluation_config"]:
if data.get(conf_name) is None:
data[conf_name] = {}
data["bbox_rasterizer_config"]["target_class_config"] = []
data["postprocessing_config"]["target_class_config"] = []
data["cost_function_config"]["target_classes"] = []
data["evaluation_config"]["minimum_detection_ground_truth_overlap"] = []
data["evaluation_config"]["evaluation_box_config"] = []
for class_name in data["classwise_config"]:
bbox_dict = {"key": class_name["key"], "value:": class_name["value"]["bbox_rasterizer_config"]}
data["bbox_rasterizer_config"]["target_class_config"].append(bbox_dict)
post_dict = {"key": class_name["key"], "value:": class_name["postprocessing_config"]}
data["postprocessing_config"]["target_class_config"].append(post_dict)
cost_dict = {"name": class_name["key"]}
cost_dict.update(class_name["value"]["cost_function_config"])
data["cost_function_config"]["target_classes"].append(cost_dict)
eval_dict_det = {"key": class_name["key"], "value": class_name["value"]["evaluation_config"]["minimum_detection_ground_truth_overlap"]}
data["evaluation_config"]["minimum_detection_ground_truth_overlap"].append(eval_dict_det)
eval_dict_conf = {"key": class_name["key"], "value": class_name["value"]["evaluation_config"]["evaluation_box_config"]}
data["evaluation_config"]["evaluation_box_config"].append(eval_dict_conf)
del data["classwise_config"]
return data
def _check_gpu_conditions(field_name, field_value):
if not field_value:
raise ValueError("GPU related value not set")
available_gpus = int(os.getenv("NUM_GPU_PER_NODE", "0"))
if field_name in ("gpus", "num_gpus"):
if int(field_value) < 0:
raise ValueError("GPU related value requested is negative")
if int(field_value) > available_gpus:
raise ValueError(f"GPUs requested count of {field_value} is greater than gpus made available during deployment {available_gpus}")
if field_name in ("gpu_ids", "gpu_id"):
available_gpu_ids = set(range(0, available_gpus))
requested_gpu_ids = set(field_value)
if not requested_gpu_ids.issubset(available_gpu_ids):
raise ValueError(f"GPU ids requested is {str(requested_gpu_ids)} but available gpu ids are {str(available_gpu_ids)}")
def validate_gpu_param_value(spec):
"""Validate the gpus requested"""
for gpu_param_name in ("gpus", "num_gpus", "gpu_ids", "gpu_id"):
if gpu_param_name in spec.keys():
field_name = gpu_param_name
field_value = spec[gpu_param_name]
_check_gpu_conditions(field_name, field_value)
if "train" in spec.keys() and gpu_param_name in spec["train"].keys():
field_name = gpu_param_name
field_value = spec["train"][gpu_param_name]
_check_gpu_conditions(field_name, field_value)
def validate_uuid(user_id=None, dataset_id=None, job_id=None, model_id=None):
"""Validate possible UUIDs"""
if user_id:
try:
uuid.UUID(user_id)
except:
return "User ID passed is not a valid UUID"
if dataset_id:
try:
uuid.UUID(dataset_id)
except:
return "Dataset ID passed is not a valid UUID"
if job_id:
try:
uuid.UUID(job_id)
except:
return "Job ID passed is not a valid UUID"
if model_id:
try:
uuid.UUID(model_id)
except:
return "Model ID passed is not a valid UUID"
return ""
def latest_model(files, delimiters="_", extensions=[".tlt", ".hdf5", ".pth"]):
"""Returns the latest generated model file based on epoch number"""
cur_best = 0
best_model = "model.tlt"
for file in files:
_, file_extension = os.path.splitext(file)
if file_extension not in extensions:
continue
model_name = file
for extension in extensions:
model_name = re.sub(f"{extension}$", "", model_name)
delimiters_list = delimiters.split(",")
if len(delimiters_list) > 1:
delimiters_list = delimiters_list[0:-1]
for delimiter in delimiters_list:
epoch_num = model_name.split(delimiter)[-1]
model_name = epoch_num
if len(delimiters) > 1:
epoch_num = model_name.split(delimiters[-1])[0]
try:
epoch_num = int(epoch_num)
except:
epoch_num = 0
if epoch_num >= cur_best:
cur_best = epoch_num
best_model = file
return best_model
def search_list_for_best_model(files):
"""Returns the latest model based on anticipated model name using regex"""
cur_best = 0
best_model = "model.tlt"
for file_name in files:
# Patterns to look for
# detectnet_v2, unet: model.tlt
# frcnn: model.epoch.<>.tlt
# Define regex rules for a potential model file
model_name_regex = re.compile(r'(trained-)?(finetuned-)?model(-)?(.epoch)?([0-9]+)?.tlt')
# Identify if current file is a model file
model_name_out = model_name_regex.search(file_name)
# If it is a valid file, proceed. Else continue the loop
if model_name_out:
# Try to extract the integer epoch string from the model file name
model_pattern = model_name_out.group()
model_stamp_regex = re.compile(r'[\d]+')
model_stamp_out = model_stamp_regex.search(model_pattern)
# Get the epoch number. If model_stamp_out is None, it means the file name is model.tlt, etc...
model_number = 0
if model_stamp_out:
model_number = int(model_stamp_out.group())
# If model's epoch better than current best, make that the best model
# Useful when output directory has checkpoints
if model_number >= cur_best:
cur_best = model_number
best_model = model_pattern
return best_model
def get_model_results_path(handler_metadata, job_id):
"""Returns path of the model based on the action of the job"""
if job_id is None:
return None
network = handler_metadata.get("network_arch")
handler_id = handler_metadata.get("id")
root = get_handler_root(handler_id)
action = get_handler_job_metadata(handler_id, job_id).get("action")
automl_path = ""
if handler_metadata.get("automl_enabled") is True and action == "train":
automl_path = "best_model"
if action == "retrain":
action = "train"
if action == "train":
res_root = os.path.join(root, str(job_id), automl_path)
if os.path.exists(res_root + "/weights") and len(os.listdir(res_root + "/weights")) > 0:
res_root = os.path.join(res_root, "weights")
if os.path.exists(os.path.join(res_root, action)):
res_root = os.path.join(res_root, action)
if os.path.exists(res_root):
# If epoch number is baked into tlt output as <yada_yada>_<epoch_number>.tlt
if network in ("classification_tf1", "classification_tf2", "classification_pyt", "efficientdet_tf2", "faster_rcnn", "multitask_classification", "dssd", "ssd", "retinanet", "yolo_v3", "yolo_v4", "yolo_v4_tiny", "segformer", "pointpillars"):
result_file = res_root + "/" + latest_model(os.listdir(res_root))
# If it follows model.tlt pattern with epoch number
elif network in ("detectnet_v2", "lprnet", "efficientdet_tf1", "mask_rcnn", "unet", "bpnet", "fpenet"):
result_file = res_root + "/" + latest_model(os.listdir(res_root), delimiters="-")
elif network in _PYT_CV_NETWORKS:
result_file = res_root + "/" + latest_model(os.listdir(res_root), delimiters="=")
else:
result_file = res_root + "/" + search_list_for_best_model(os.listdir(res_root))
else:
result_file = None
elif action == "prune":
result_file = (glob.glob(f"{os.path.join(root, str(job_id))}/**/*.tlt", recursive=True) + glob.glob(f"{os.path.join(root, str(job_id))}/**/*.hdf5", recursive=True) + glob.glob(f"{os.path.join(root, str(job_id))}/**/*.pth", recursive=True))[0]
elif action == "export":
result_file = (glob.glob(f"{os.path.join(root, str(job_id))}/**/*.onnx", recursive=True) + glob.glob(f"{os.path.join(root, str(job_id))}/**/*.uff", recursive=True))[0]
elif action in ("trtexec", "gen_trt_engine"):
result_file = os.path.join(root, str(job_id), "model.engine")
if not os.path.exists(result_file):
result_file = os.path.join(root, str(job_id), action, "model.engine")
else:
result_file = None
return result_file
# Helper constants
_OD_NETWORKS = set(["detectnet_v2", "faster_rcnn", "yolo_v3", "yolo_v4", "yolo_v4_tiny", "dssd", "ssd", "retinanet", "efficientdet_tf1", "efficientdet_tf2", "deformable_detr", "dino"])
_PURPOSE_BUILT_MODELS = set(["action_recognition", "ml_recog", "ocdnet", "ocrnet", "optical_inspection", "pose_classification", "re_identification"])
_TF1_NETWORKS = set(["detectnet_v2", "faster_rcnn", "yolo_v4", "yolo_v4_tiny", "yolo_v3", "dssd", "ssd", "retinanet", "unet", "mask_rcnn", "lprnet", "classification_tf1", "efficientdet_tf1", "multitask_classification", "bpnet", "fpenet"])
_TF2_NETWORKS = set(["classification_tf2", "efficientdet_tf2"])
_PYT_TAO_NETWORKS = set(["action_recognition", "deformable_detr", "dino", "mal", "ml_recog", "ocdnet", "ocrnet", "optical_inspection", "pointpillars", "pose_classification", "re_identification", "segformer"])
_PYT_PLAYGROUND_NETWORKS = set(["classification_pyt"])
_PYT_CV_NETWORKS = _PYT_TAO_NETWORKS | _PYT_PLAYGROUND_NETWORKS
VALID_DSTYPES = ("object_detection", "semantic_segmentation", "image_classification",
"instance_segmentation", "character_recognition", # CV
"bpnet", "fpenet", # DRIVEIX
"action_recognition", "ml_recog", "ocdnet", "ocrnet", "optical_inspection", "pointpillars", "pose_classification", "re_identification") # PYT CV MODELS
VALID_NETWORKS = ("detectnet_v2", "faster_rcnn", "yolo_v4", "yolo_v4_tiny", "yolo_v3", "dssd", "ssd", "retinanet",
"unet", "mask_rcnn", "lprnet", "classification_tf1", "classification_tf2", "efficientdet_tf1", "efficientdet_tf2", "multitask_classification",
"bpnet", "fpenet", # DRIVEIX
"action_recognition", "classification_pyt", "mal", "ml_recog", "ocdnet", "ocrnet", "optical_inspection", "pointpillars", "pose_classification", "re_identification", "deformable_detr", "dino", "segformer", # PYT CV MODELS
"annotations", "analytics", "augmentation", "auto_label") # Data_Service tasks.
NO_SPEC_ACTIONS_MODEL = ("evaluate", "retrain", "inference", "inference_seq", "inference_trt") # Actions with **optional** specs
NO_PTM_MODELS = set([])
_ITER_MODELS = ("segformer")
AUTOML_DISABLED_NETWORKS = ["mal"]
| tao_front_end_services-main | api/handlers/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions to infer values
Each function takes as input:
- handler (dataset / model)
- app_handler
- job_context
"""
import os
import glob
import sys
import shutil
import uuid
from handlers.utilities import search_for_ptm, get_model_results_path, read_network_config
from handlers.stateless_handlers import get_handler_root, get_handler_spec_root, get_handler_job_metadata, load_json_data
def infer_verbose(job_context, handler_metadata):
"""Return True to enable verbose commands"""
return True
def infer_key(job_context, handler_metadata):
"""Returns the encryption key associated with the model"""
try:
return handler_metadata.get("encryption_key", "tlt_encode")
except:
return None
def infer_output_dir(job_context, handler_metadata):
"""Creates output directory within handler root"""
job_id = str(job_context.id)
outroot = get_handler_root(handler_metadata.get("id"))
results_dir = os.path.join(outroot, job_id)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
return results_dir + "/"
def infer_spec_file(job_context, handler_metadata):
"""Returns path of the spec file of a job"""
network = job_context.network
network_config = read_network_config(network)
api_params = network_config.get("api_params", {})
spec_root = get_handler_spec_root(handler_metadata.get("id"))
job_id = str(job_context.id)
if job_context.action == "convert_efficientdet_tf2":
spec_path = os.path.join(spec_root, job_id + ".yaml")
else:
spec_path = os.path.join(spec_root, job_id + "." + api_params["spec_backend"])
return spec_path
# NOTE: Only supports those with ngc_path to be PTMs
def infer_ptm(job_context, handler_metadata):
"""Returns a list of path of the ptm files of a network"""
network = job_context.network
handler_ptms = handler_metadata.get("ptm", None)
if handler_ptms is None:
return None
ptm_file = []
for handler_ptm in handler_ptms:
if handler_ptm:
ptm_root = get_handler_root(handler_ptm)
ptm_file.append(search_for_ptm(ptm_root, network=network))
return ",".join(ptm_file)
def infer_pruned_model(job_context, handler_metadata):
"""Returns path of the pruned model"""
handler_root = get_handler_root(handler_metadata.get("id"))
if not handler_root:
return None
if handler_metadata["network_arch"] in ("efficientdet_tf2", "classification_tf2"):
return os.path.join(handler_root, job_context.id, "pruned_model.tlt")
pruned_model = os.path.join(handler_root, job_context.parent_id, "pruned_model.tlt")
if os.path.exists(pruned_model):
return pruned_model
if os.path.exists(pruned_model.replace(".tlt", ".pth")):
return pruned_model.replace(".tlt", ".pth")
if os.path.exists(pruned_model.replace(".tlt", ".hdf5")):
return pruned_model.replace(".tlt", ".hdf5")
return None
def infer_parent_model(job_context, handler_metadata):
"""Returns path of the weight file of the parent job"""
parent_model = get_model_results_path(handler_metadata, job_context.parent_id)
if os.path.exists(str(parent_model)):
return parent_model
return None
def infer_resume_model(job_context, handler_metadata):
"""Returns path of the weight file of the current job"""
parent_model = get_model_results_path(handler_metadata, job_context.id)
if os.path.exists(str(parent_model)):
return parent_model
return None
def infer_resume_model_or_ptm(job_context, handler_metadata):
"""Returns path of the weight file of the current job if exists else returns path of the ptm files"""
resume_model = infer_resume_model(job_context, handler_metadata)
if resume_model:
return resume_model
return infer_ptm(job_context, handler_metadata)
def infer_ptm_if_no_resume_model(job_context, handler_metadata):
"""Returns path of path of the ptm files if there is no model to resume"""
resume_model = infer_resume_model(job_context, handler_metadata)
if resume_model:
return None
return infer_ptm(job_context, handler_metadata)
def infer_automl_assign_ptm(job_context, handler_metadata, job_root, rec_number):
"""Returns path of path of the ptm files if there is no model to resume for AutoML"""
expt_root = infer_automl_output_dir(job_context, handler_metadata, job_root, rec_number)
resume_model = glob.glob(expt_root + "/**/*.tlt", recursive=True) + glob.glob(expt_root + "/**/*.hdf5", recursive=True) + glob.glob(expt_root + "/**/*.pth", recursive=True)
if not resume_model:
return infer_ptm(job_context, handler_metadata)
return None
def infer_automl_resume_model(job_context, handler_metadata, job_root, rec_number):
"""Returns path of the checkpoint file for the automl recommendation to resume on"""
expt_root = infer_automl_output_dir(job_context, handler_metadata, job_root, rec_number)
resume_model = glob.glob(expt_root + "/**/*.tlt", recursive=True) + glob.glob(expt_root + "/**/*.hdf5", recursive=True) + glob.glob(expt_root + "/**/*.pth", recursive=True)
resume_model.sort(reverse=False)
if resume_model:
resume_model = resume_model[0]
return resume_model
def infer_automl_ptm_if_no_resume_model(job_context, handler_metadata, job_root, rec_number):
"""Returns path of the checkpoint file for the automl recommendation to resume on"""
expt_root = infer_automl_output_dir(job_context, handler_metadata, job_root, rec_number)
resume_model = glob.glob(expt_root + "/**/*.tlt", recursive=True) + glob.glob(expt_root + "/**/*.hdf5", recursive=True) + glob.glob(expt_root + "/**/*.pth", recursive=True)
resume_model.sort(reverse=False)
if resume_model:
return resume_model[0]
return infer_ptm(job_context, handler_metadata)
def infer_automl_experiment_spec(job_context, handler_metadata, job_root, rec_number):
"""Returns path automl spec file"""
network = job_context.network
network_config = read_network_config(network)
api_params = network_config.get("api_params", {})
experiment_spec = f"{job_root}/recommendation_{rec_number}.{api_params['spec_backend']}"
return experiment_spec
def infer_automl_assign_resume_epoch(job_context, handler_metadata, job_root, rec_number):
"""Returns path automl spec file"""
additional_epoch = 0
if job_context.network != "efficientdet_tf2":
additional_epoch = 1 # epoch numbers indexed by 1
resume_epoch_number = 0 + additional_epoch
if infer_automl_resume_model(job_context, handler_metadata, job_root, rec_number):
brain_dict = load_json_data(json_file=f"{job_root}/brain.json")
resume_epoch_number = int(brain_dict.get("resume_epoch_number", -1)) + additional_epoch
return resume_epoch_number
def infer_automl_output_dir(job_context, handler_metadata, job_root, rec_number):
"""Returns path of the automl experiment folder"""
expt_root = os.path.join(job_root, f"experiment_{rec_number}/")
return expt_root
def infer_parent_model_evaluate(job_context, handler_metadata):
"""Returns path of the weight file of the parent job if exists else returns path of the ptm files"""
# Assumes: <results_dir/weights> is stored
# If extension is None: output is based on RESULTS_RELPATH
# If extension exists, then search for that extension
parent_job_id = job_context.parent_id
handler_id = handler_metadata.get("id")
parent_action = get_handler_job_metadata(handler_id, parent_job_id).get("action")
if parent_action == "export":
parent_model = os.path.join(get_handler_root(handler_metadata.get("id")), str(job_context.parent_id), "model.engine")
else:
parent_model = get_model_results_path(handler_metadata, job_context.parent_id)
if os.path.exists(str(parent_model)):
return parent_model
# This means, running eval without a parent => eval a PTM!
# It is the duty of user to have given a PTM. Else job will error out without launching.
ptm = infer_ptm(job_context, handler_metadata)
return ptm
def infer_framework_evaluate(job_context, handler_metadata):
"""Returns framework to evaluate model on based on the parent action"""
parent_job_id = job_context.parent_id
handler_id = handler_metadata.get("id")
parent_action = get_handler_job_metadata(handler_id, parent_job_id).get("action")
if parent_action == "export":
return "tensorrt"
return "tlt"
def infer_framework_evaluate_storetrue(job_context, handler_metadata):
"""Returns whether the evaluation framework is tensorrt or not"""
framework = infer_framework_evaluate(job_context, handler_metadata)
return framework == "tensorrt"
def infer_output_file(job_context, handler_metadata, extension):
"""Create output folder based on the filepath"""
# Create all directories up until the file name
outdir = infer_output_dir(job_context, handler_metadata)
path = os.path.join(outdir, extension)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return path
def infer_output_hdf5(job_context, handler_metadata):
"""Calls infer_output_file for model.hdf5"""
return infer_output_file(job_context, handler_metadata, "model.hdf5")
def infer_output_pth(job_context, handler_metadata):
"""Calls infer_output_file for model.pth"""
return infer_output_file(job_context, handler_metadata, "model.pth")
def infer_output_onnx(job_context, handler_metadata):
"""Calls infer_output_file for model.onnx"""
return infer_output_file(job_context, handler_metadata, "model.onnx")
def infer_output_trt(job_context, handler_metadata):
"""Calls infer_output_file for model.engine"""
return infer_output_file(job_context, handler_metadata, "model.engine")
def infer_output_weights_tlt(job_context, handler_metadata):
"""Calls infer_output_file for weights/model.tlt"""
return infer_output_file(job_context, handler_metadata, "weights/model.tlt")
def infer_merged_json(job_context, handler_metadata):
"""Calls infer_output_file for merged.json"""
return infer_output_file(job_context, handler_metadata, "merged.json")
def infer_cal_cache(job_context, handler_metadata):
"""Calls infer_output_file for cal.bin"""
return infer_output_file(job_context, handler_metadata, "cal.bin")
def infer_cal_data_file(job_context, handler_metadata):
"""Calls infer_output_file for calibration.tensorfile"""
return infer_output_file(job_context, handler_metadata, "calibration.tensorfile")
def infer_create_inference_result_file_pose(job_context, handler_metadata):
"""Calls infer_output_file for results.txt"""
return infer_output_file(job_context, handler_metadata, "results.txt")
def infer_create_evaluate_matches_plot_reid(job_context, handler_metadata):
"""Calls infer_output_file for sampled_matches.png"""
return infer_output_file(job_context, handler_metadata, "sampled_matches.png")
def infer_create_evaluate_cmc_plot_reid(job_context, handler_metadata):
"""Calls infer_output_file for cmc_curve.png"""
return infer_output_file(job_context, handler_metadata, "cmc_curve.png")
def infer_create_inference_result_file_json(job_context, handler_metadata):
"""Calls infer_output_file for inference.json"""
return infer_output_file(job_context, handler_metadata, "inference.json")
def infer_parent_spec(job_context, handler_metadata):
"""Returns path of the spec file of the parent job"""
handler_id = handler_metadata.get("id")
parent_job_id = job_context.parent_id
network = job_context.network
network_config = read_network_config(network)
api_params = network_config.get("api_params", {})
parent_action = get_handler_job_metadata(handler_metadata.get("id"), job_context.parent_id).get("action")
if handler_metadata.get("automl_enabled") is True and parent_action == "train":
root = get_handler_root(handler_id)
automl_root = os.path.join(root, parent_job_id, "best_model")
spec_file = (glob.glob(f"{automl_root}/*recommendation*.protobuf") + glob.glob(f"{automl_root}/*recommendation*.yaml"))[0]
spec_file_copy = os.path.join(get_handler_spec_root(handler_id), job_context.id + "." + api_params["spec_backend"])
else:
spec_file = os.path.join(get_handler_spec_root(handler_id), parent_job_id + "." + api_params["spec_backend"])
spec_file_copy = spec_file.replace(parent_job_id, job_context.id)
os.makedirs(os.path.dirname(os.path.abspath(spec_file_copy)), exist_ok=True)
shutil.copy(spec_file, spec_file_copy)
return spec_file
def infer_parents_parent_spec(job_context, handler_metadata):
"""Returns path of the spec file of the parent's parent job"""
handler_id = handler_metadata.get("id")
parent_job_id = job_context.parent_id
parents_parent_job_id = get_handler_job_metadata(handler_id, parent_job_id).get("parent_id", "")
parents_parent_action = get_handler_job_metadata(handler_metadata.get("id"), parents_parent_job_id).get("action")
if parents_parent_action == "dataset_convert":
print("Dataset convert spec can't be used for this job, returning parent's spec now", file=sys.stderr)
return infer_parent_spec(job_context, handler_metadata)
try:
uuid.UUID(parents_parent_job_id)
except:
print("Parent's parent job id can't be found, Searching for parent's spec now", file=sys.stderr)
return infer_parent_spec(job_context, handler_metadata)
network = job_context.network
network_config = read_network_config(network)
api_params = network_config.get("api_params", {})
if handler_metadata.get("automl_enabled") is True and parents_parent_action == "train":
root = get_handler_root(handler_id)
automl_root = os.path.join(root, parents_parent_job_id, "best_model")
spec_file = (glob.glob(f"{automl_root}/*recommendation*.protobuf") + glob.glob(f"{automl_root}/*recommendation*.yaml"))[0]
spec_file_copy = os.path.join(get_handler_spec_root(handler_id), job_context.id + "." + api_params["spec_backend"])
else:
spec_file = os.path.join(get_handler_spec_root(handler_id), parents_parent_job_id + "." + api_params["spec_backend"])
spec_file_copy = spec_file.replace(parents_parent_job_id, job_context.id)
if not os.path.exists(spec_file):
print("Parent's parent spec can't be found, Searching for parent's spec now", file=sys.stderr)
return infer_parent_spec(job_context, handler_metadata)
os.makedirs(os.path.dirname(os.path.abspath(spec_file_copy)), exist_ok=True)
shutil.copy(spec_file, spec_file_copy)
return spec_file
def infer_parent_spec_copied(job_context, handler_metadata):
"""Returns path of the spec file path copied from the parent job"""
handler_id = handler_metadata.get("id")
parent_job_id = job_context.parent_id
network = job_context.network
network_config = read_network_config(network)
api_params = network_config.get("api_params", {})
parent_action = get_handler_job_metadata(handler_metadata.get("id"), job_context.parent_id).get("action")
if handler_metadata.get("automl_enabled") is True and parent_action == "train":
root = get_handler_root(handler_id)
automl_root = os.path.join(root, parent_job_id, "best_model")
spec_file = (glob.glob(f"{automl_root}/*recommendation*.protobuf") + glob.glob(f"{automl_root}/*recommendation*.yaml"))[0]
spec_file_copy = os.path.join(get_handler_spec_root(handler_id), job_context.id + "." + api_params["spec_backend"])
else:
spec_file = os.path.join(get_handler_spec_root(handler_id), parent_job_id + "." + api_params["spec_backend"])
spec_file_copy = spec_file.replace(parent_job_id, job_context.id)
os.makedirs(os.path.dirname(os.path.abspath(spec_file_copy)), exist_ok=True)
shutil.copy(spec_file, spec_file_copy)
return spec_file_copy
def infer_parent_cal_cache(job_context, handler_metadata):
"""Returns path of the cal.bin of the parent job"""
parent_job_id = job_context.parent_id
cal_file = os.path.join(get_handler_root(handler_metadata.get("id")), parent_job_id, "cal.bin")
if os.path.exists(cal_file):
return cal_file
return None
def infer_lprnet_inference_input(job_context, handler_metadata):
"""Returns path of the inference images for lprnet"""
# Returns root + "/image/" if it exists
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is None:
return None
images_path = get_handler_root(infer_ds) + "/image/"
if os.path.exists(images_path):
return images_path
return None
def infer_classification_val_input(job_context, handler_metadata):
"""Returns path of the inference images for object_detection networks"""
infer_ds = handler_metadata.get("eval_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/images/"):
images_path = images_root + "/images/"
return images_path
if os.path.exists(images_root + "/images_val/"):
images_path = images_root + "/images_val/"
return images_path
print(f"Warning: Image directory not found in {images_root}", file=sys.stderr)
return None
# OD helper functions
def infer_od_inference_input(job_context, handler_metadata):
"""Returns path of the inference images for object_detection networks"""
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/images/"):
images_path = images_root + "/images/"
return images_path
if os.path.exists(images_root + "/images_test/"):
images_path = images_root + "/images_test/"
return images_path
print(f"Warning: Image directory not found in {images_root}", file=sys.stderr)
return None
def infer_od_inference_labels(job_context, handler_metadata):
"""Returns path of the inference labels for object_detection networks"""
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/labels/"):
images_path = images_root + "/labels/"
return images_path
print(f"Warning: Labels directory not found in {images_root}", file=sys.stderr)
return None
def infer_od_inference_label_map(job_context, handler_metadata):
"""Returns path of label_map.txt for object_detection networks"""
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is None:
return None
label_map_path = get_handler_root(infer_ds) + "/label_map.txt"
if os.path.exists(label_map_path):
return label_map_path
if os.path.exists(label_map_path.replace(".txt", ".yaml")):
return label_map_path.replace(".txt", ".yaml")
return None
def infer_od_inference_input_image(job_context, handler_metadata):
"""Returns path of a single inference image for object_detection networks"""
print("Warning: Only single image can be inferred for multitask classification", file=sys.stderr)
images_path = infer_od_inference_input(job_context, handler_metadata)
if images_path:
im_path = glob.glob(images_path + "/*")[0]
return im_path
return None
def infer_od_dir(job_context, handler_metadata, dirname):
"""Returns joined-path of handler_root and dirname"""
handler_root = get_handler_root(handler_metadata.get("id"))
path = f"{handler_root}/{dirname}"
if os.path.exists(path):
return path
return None
def infer_od_images(job_context, handler_metadata):
"""Calls infer_od_dir on images directory"""
return infer_od_dir(job_context, handler_metadata, "images/")
def infer_od_labels(job_context, handler_metadata):
"""Calls infer_od_dir on labels directory"""
return infer_od_dir(job_context, handler_metadata, "labels/")
def infer_unet_val_images(job_context, handler_metadata):
"""Returns path of the images for unet dataset type networks"""
infer_ds = handler_metadata.get("eval_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/images/val/"):
images_path = images_root + "/images/val/"
return images_path
print(f"Warning: Labels directory not found in {images_root}", file=sys.stderr)
return None
def infer_unet_val_labels(job_context, handler_metadata):
"""Returns path of the labels for unet dataset type networks"""
infer_ds = handler_metadata.get("eval_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/masks/val/"):
images_path = images_root + "/masks/val/"
return images_path
print(f"Warning: Labels directory not found in {images_root}", file=sys.stderr)
return None
def infer_unet_test_images(job_context, handler_metadata):
"""Returns path of the images for unet dataset type networks"""
infer_ds = handler_metadata.get("eval_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/images/test/"):
images_path = images_root + "/images/test/"
return images_path
print(f"Warning: Labels directory not found in {images_root}", file=sys.stderr)
return None
def infer_unet_test_labels(job_context, handler_metadata):
"""Returns path of the labels for unet dataset type networks"""
infer_ds = handler_metadata.get("eval_dataset", None)
if infer_ds is None:
return None
images_root = get_handler_root(infer_ds)
if os.path.exists(images_root + "/masks/test/"):
images_path = images_root + "/masks/test/"
return images_path
print(f"Warning: Labels directory not found in {images_root}", file=sys.stderr)
return None
def infer_od_annotations(job_context, handler_metadata):
"""Calls infer_od_dir on annotations.json file"""
return infer_od_dir(job_context, handler_metadata, "annotations.json")
def infer_parent_classmap(job_context, handler_metadata):
"""Returns path of classmap file of parent job"""
parent_job_id = job_context.parent_id
classmap_path = None
# Check if inference dataset has classmap file
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is not None:
classmap_path = os.path.join(get_handler_root(infer_ds), "classmap.json")
if os.path.exists(str(classmap_path)):
return classmap_path
# Else check for classmap presence in the parent job's artifacts
parent_action = get_handler_job_metadata(handler_metadata.get("id"), job_context.parent_id).get("action")
automl_path = ""
if handler_metadata.get("automl_enabled") is True and parent_action == "train":
automl_path = "best_model"
if parent_job_id:
classmap_path = glob.glob(f'{os.path.join(get_handler_root(handler_metadata.get("id")), str(parent_job_id), automl_path)}/**/*classmap.json', recursive=True)
if not classmap_path:
classmap_path = glob.glob(f'{os.path.join(get_handler_root(handler_metadata.get("id")), str(parent_job_id), automl_path)}/**/*class_mapping.json', recursive=True)
if classmap_path and os.path.exists(str(classmap_path[0])):
# Copy parent classmap as current classmap - needed for consecutive jobs which uses parent classmap
os.makedirs(os.path.join(get_handler_root(handler_metadata.get("id")), str(job_context.id)), exist_ok=True)
shutil.copy(classmap_path[0], classmap_path[0].replace(parent_job_id, job_context.id).replace(automl_path, "").replace(parent_action, ""))
return classmap_path[0]
print("Warning: classmap.json needs to be uploaded with inference dataset", file=sys.stderr)
return None
def infer_cal_image_dir(job_context, handler_metadata):
"""Returns path of calibration image directory"""
# Infer calibration image dir
# Idea is to use calibration_dataset's root/images/ directory
# If not present, we simply error out
calib_ds = handler_metadata.get("calibration_dataset", None)
if calib_ds is None:
return None
if job_context.network == "unet":
images_path = get_handler_root(calib_ds) + "/images/train/"
elif job_context.network == "ocdnet":
images_path = get_handler_root(calib_ds) + "/train/img/"
else:
images_path = get_handler_root(calib_ds) + "/images/"
if os.path.exists(images_path):
return images_path
if os.path.exists(images_path.replace("/images/", "/images_train/")):
return images_path.replace("/images/", "/images_train/")
if os.path.exists(images_path.replace("/images/", "/train2017/")):
return images_path.replace("/images/", "/train2017/")
return None
def infer_cal_image_dir_list(job_context, handler_metadata):
"""Returns list of path of calibration images"""
# Infer calibration image dir
# Idea is to use calibration_dataset's root/images/ directory
# If not present, we simply error out
calib_ds = handler_metadata.get("calibration_dataset", None)
if calib_ds is None:
return None
if job_context.network == "ml_recog":
images_path = get_handler_root(calib_ds) + "/metric_learning_recognition/retail-product-checkout-dataset_classification_demo/known_classes/test/"
elif job_context.network == "ocrnet":
images_path = get_handler_root(calib_ds) + "/train/"
else:
images_path = get_handler_root(calib_ds) + "/images/"
if os.path.exists(images_path):
return [images_path]
if os.path.exists(images_path.replace("/images/", "/images_train/")):
return [images_path.replace("/images/", "/images_train/")]
if os.path.exists(images_path.replace("/images/", "/train2017/")):
return [images_path.replace("/images/", "/train2017/")]
return None
def infer_bpnet_coco_spec(job_context, handler_metadata):
"""Returns path of coco_spec file for bpnet"""
train_ds = handler_metadata.get("train_datasets", [])[0]
handler_root = get_handler_root(train_ds)
infer_json = handler_root + "/coco_spec.json"
return infer_json
def infer_bpnet_inference(job_context, handler_metadata):
"""Returns path of inference dataset for bpnet"""
train_ds = handler_metadata.get("train_datasets", [])[0]
handler_root = get_handler_root(train_ds)
infer_path = handler_root + "/val2017"
return infer_path
def infer_data_json(job_context, handler_metadata):
"""Returns path of data json"""
train_ds = handler_metadata.get("train_datasets", [])
if train_ds != []:
handler_root = get_handler_root(train_ds[0])
else:
handler_root = get_handler_root(handler_metadata.get("id"))
return os.path.join(handler_root, "data.json")
def infer_inference_data(job_context, handler_metadata):
"""Returns path of dataset to run sample inference on"""
train_ds = handler_metadata.get("train_datasets", [])[0]
handler_root = get_handler_root(train_ds)
return handler_root
def infer_gt_cache(job_context, handler_metadata):
"""Returns path of label.json for auto_labeling"""
infer_ds = handler_metadata.get("inference_dataset", None)
if infer_ds is None:
return None
gt_cache_path = os.path.join(get_handler_root(infer_ds), "label.json")
if os.path.exists(gt_cache_path):
return gt_cache_path
return None
def infer_label_output(job_context, handler_metadata):
"""Returns path of label.json for auto_labeling"""
results_dir = infer_output_dir(job_context, handler_metadata)
label_output = os.path.join(results_dir, "label.json")
return label_output
CLI_CONFIG_TO_FUNCTIONS = {"output_dir": infer_output_dir,
"key": infer_key,
"experiment_spec": infer_spec_file,
"pruned_model": infer_pruned_model,
"parent_model": infer_parent_model,
"parent_model_evaluate": infer_parent_model_evaluate,
"resume_model": infer_resume_model,
"resume_model_or_ptm": infer_resume_model_or_ptm,
"ptm_if_no_resume_model": infer_ptm_if_no_resume_model,
"automl_assign_ptm": infer_automl_assign_ptm,
"automl_resume_model": infer_automl_resume_model,
"automl_ptm_if_no_resume_model": infer_automl_ptm_if_no_resume_model,
"automl_experiment_spec": infer_automl_experiment_spec,
"automl_output_dir": infer_automl_output_dir,
"automl_assign_resume_epoch": infer_automl_assign_resume_epoch,
"framework": infer_framework_evaluate,
"framework_storetrue": infer_framework_evaluate_storetrue,
"verbose": infer_verbose,
"ptm": infer_ptm,
"create_hdf5_file": infer_output_hdf5,
"create_pth_file": infer_output_pth,
"create_onnx_file": infer_output_onnx,
"create_engine_file": infer_output_trt,
"create_weights_tlt_file": infer_output_weights_tlt,
"create_cal_cache": infer_cal_cache,
"create_cal_data_file": infer_cal_data_file,
"parent_spec": infer_parent_spec,
"parents_parent_spec": infer_parents_parent_spec,
"parent_spec_copied": infer_parent_spec_copied,
"parent_cal_cache": infer_parent_cal_cache,
"merged_json": infer_merged_json,
"create_inference_result_file_pose": infer_create_inference_result_file_pose,
"create_evaluate_matches_plot_reid": infer_create_evaluate_matches_plot_reid,
"create_evaluate_cmc_plot_reid": infer_create_evaluate_cmc_plot_reid,
"create_inference_result_file_reid": infer_create_inference_result_file_json,
"create_inference_result_file_mal": infer_create_inference_result_file_json,
"lprnet_inference_input": infer_lprnet_inference_input,
"classification_val_input": infer_classification_val_input,
"od_inference_input": infer_od_inference_input,
"od_inference_input_image": infer_od_inference_input_image,
"cal_image_dir": infer_cal_image_dir,
"cal_image_dir_list": infer_cal_image_dir_list,
"od_images": infer_od_images,
"od_labels": infer_od_labels,
"od_annotations": infer_od_annotations,
"od_inference_label_map": infer_od_inference_label_map,
"od_inference_labels": infer_od_inference_labels,
"unet_val_images": infer_unet_val_images,
"unet_val_labels": infer_unet_val_labels,
"unet_test_images": infer_unet_test_images,
"unet_test_labels": infer_unet_test_labels,
"create_od_tfrecords": lambda a, b: get_handler_root(b.get("id")) + "/tfrecords/",
"output_dir_images_annotated": lambda a, b: infer_output_dir(a, b) + "/images_annotated/",
"output_dir_labels": lambda a, b: infer_output_dir(a, b) + "/labels/",
"output_dir_inference_json": lambda a, b: infer_output_dir(a, b) + "/annotations_mal.json",
"root": lambda a, b: get_handler_root(b.get("id")), # Just return the root of the handler object
"augment_out": lambda a, b: get_handler_root(b.get("id")) + "/augment",
"from_csv": lambda a, b: None, # Used to infer the param from spec sheet
"parent_classmap": infer_parent_classmap,
"bpnet_coco_spec": infer_bpnet_coco_spec,
"bpnet_infer": infer_bpnet_inference,
"fpenet_data_json": infer_data_json,
"fpenet_inference_data": infer_inference_data,
"label_gt_cache": infer_gt_cache,
"auto_label_output": infer_label_output}
| tao_front_end_services-main | api/handlers/infer_params.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for LPRNet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for LPRNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to OpenALPR's benchmark end2end us license plates."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT train/eval dataset."
)
return parser.parse_args(args)
def prepare_data(input_dir, img_list, output_dir):
"""Crop the license plates from the orginal images."""
target_img_path = os.path.join(output_dir, "image")
target_label_path = os.path.join(output_dir, "label")
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
for img_name in img_list:
img_path = os.path.join(input_dir, img_name)
label_path = os.path.join(input_dir,
img_name.split(".")[0] + ".txt")
img = cv2.imread(img_path)
with open(label_path, "r") as f:
label_lines = f.readlines()
assert len(label_lines) == 1
label_items = label_lines[0].split()
assert img_name == label_items[0]
xmin = int(label_items[1])
ymin = int(label_items[2])
width = int(label_items[3])
xmax = xmin + width
height = int(label_items[4])
ymax = ymin + height
lp = label_items[5]
cropped_lp = img[ymin:ymax, xmin:xmax, :]
# save img and label
cv2.imwrite(os.path.join(target_img_path, img_name), cropped_lp)
with open(os.path.join(target_label_path,
img_name.split(".")[0] + ".txt"), "w") as f:
f.write(lp)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
img_files = []
for file_name in os.listdir(args.input_dir):
if file_name.split(".")[-1] == "jpg":
img_files.append(file_name)
total_cnt = len(img_files)
train_cnt = int(total_cnt / 2)
val_cnt = total_cnt - train_cnt
train_img_list = img_files[0:train_cnt]
val_img_list = img_files[train_cnt + 1:]
print("Total {} samples in benchmark dataset".format(total_cnt))
print("{} for train and {} for val".format(train_cnt, val_cnt))
train_dir = os.path.join(args.output_dir, "train")
prepare_data(args.input_dir, train_img_list, train_dir)
val_dir = os.path.join(args.output_dir, "val")
prepare_data(args.input_dir, val_img_list, val_dir)
if __name__ == "__main__":
main()
| tao_front_end_services-main | notebooks/cli/dataset_prepare/lprnet/preprocess_openalpr_benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPENet data conversion utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import cv2
import os
import numpy as np
import json
def get_keypoints_from_file(keypoints_file):
'''
This function reads the keypoints file from afw format.
Input:
keypoints_file (str): Path to the keypoints file.
Output:
keypoints (np.array): Keypoints in numpy format [[x, y], [x, y]].
'''
keypoints = []
with open(keypoints_file) as fid:
for line in fid:
if "version" in line or "points" in line or "{" in line or "}" in line:
continue
else:
loc_x, loc_y = line.strip().split(sep=" ")
keypoints.append([float(loc_x), float(loc_y)])
keypoints = np.array(keypoints, dtype=float)
assert keypoints.shape[1] == 2, "Keypoints should be 2D."
return keypoints
def convert_dataset(container_root_path, afw_data_path, output_json_path, afw_image_save_path, key_points=80):
'''
Function to convert afw dataset to Sloth format json.
Input:
afw_data_path (str): Path to afw data folder.
output_json_path (str): Path to output json file.
afw_image_save_path (str): Image paths to use in json.
Returns:
None
'''
# get dataset file lists
all_files = os.listdir(afw_data_path)
images = [x for x in all_files if x.endswith('.jpg')]
# keypoint_files = [img_path.split(".")[-2] + ".pts" for img_path in images]
output_folder = os.path.dirname(output_json_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# read and convert to sloth format
sloth_data = []
for image in images:
image_path = os.path.join(afw_data_path, image)
image_read = cv2.imread(image_path)
if image_read is None:
print('Bad image:{}'.format(image_path))
continue
# convert image to png
image_png = image.replace('.jpg', '.png')
cv2.imwrite(os.path.join(afw_data_path, image_png), image_read)
image_data = {}
image_data['filename'] = os.path.join(container_root_path, "data/afw", image_png)
image_data['class'] = 'image'
annotations = {}
annotations['tool-version'] = '1.0'
annotations['version'] = 'v1'
annotations['class'] = 'FiducialPoints'
keypoint_file = image.split(".")[-2] + ".pts"
image_keypoints = get_keypoints_from_file(os.path.join(afw_data_path, keypoint_file))
if key_points == 80:
for num, keypoint in enumerate(image_keypoints):
annotations["P{}x".format(num + 1)] = keypoint[0]
annotations["P{}y".format(num + 1)] = keypoint[1]
# fill in dummy keypoints for keypoints 69 to 80
for num in range(69, 81, 1):
annotations["P{}x".format(num)] = image_keypoints[0][0]
annotations["P{}y".format(num)] = image_keypoints[0][1]
annotations["P{}occluded".format(num)] = True
elif key_points == 10:
key_id = 1
for num, keypoint in enumerate(image_keypoints):
# change to 10-points dataset:
if (num + 1) in [1, 9, 17, 20, 25, 39, 45, 34, 49, 55]:
annotations["P{}x".format(key_id)] = keypoint[0]
annotations["P{}y".format(key_id)] = keypoint[1]
key_id += 1
else:
raise ValueError("This script only generates 10 & 80 keypoints dataset.")
image_data['annotations'] = [annotations]
sloth_data.append(image_data)
# save json
with open(output_json_path, "w") as config_file:
json.dump(sloth_data, config_file, indent=4)
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(
description='Transform dataset for FPENet tutorial')
parser.add_argument(
"--afw_data_path",
type=str,
required=True,
help="Input directory to AFW dataset imnages and ground truth keypoints."
)
parser.add_argument(
"--container_root_path",
type=str,
required=True,
help="Path of image folder with respect to the container"
)
parser.add_argument(
"--output_json_path",
type=str,
required=True,
help="Output json file path to save to."
)
parser.add_argument(
"--afw_image_save_path",
type=str,
required=True,
help="Image path to use in jsons."
)
parser.add_argument(
"--num_key_points",
type=int,
default=80,
help="Number of key points."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args()
convert_dataset(args.container_root_path, args.afw_data_path, args.output_json_path, args.afw_image_save_path, args.num_key_points)
| tao_front_end_services-main | notebooks/cli/dataset_prepare/fpenet/data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import h5py
import cv2
import os
import csv
def build_command_line_parser(parser=None):
"""Build command line parser for dataset_convert.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='process_cohface',
description='Convert COHFACE into heartratenet api compatible dataset',
)
parser.add_argument('-i', '--input_path',
type=str,
required=True,
help='Input path for COHFACE, this is the root of the dataset')
parser.add_argument('-o', '--output_path',
type=str,
required=True,
help='Output path for COHFACE, this is the root of the dataset')
parser.add_argument('-start_subject_id', '--start_subject_id',
type=int,
required=True,
help='Start subject id for COHFACE')
parser.add_argument('-end_subject_id', '--end_subject_id',
type=int,
required=True,
help='End subject id for COHFACE')
parser.add_argument('-b', '--breathing_rate',
action='store_true',
default=False,
help='If true, processes the dataset for breathing rate, else exports heart rate')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args()
return args_parsed
def get_timestamp_from_video(video_filename):
"""get video timestamp.
Args:
video_filename (str): video filename
Returns:
timestamps(list of float): a list of timestamps for each frame in video
"""
cap = cv2.VideoCapture(video_filename)
# fps = cap.get(cv2.CAP_PROP_FPS)
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC) / 1000] # convert MSEC to SEC
# calc_timestamps = [0.0]
while (cap.isOpened()):
frame_exists, curr_frame = cap.read()
if frame_exists:
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
else:
break
cap.release()
return timestamps
def process_subject(path, output, breathing=False):
"""convert COHFACE data format for subject.
Args:
path (str): input dataset path
output (str): output dataset path after format conversion
breathing (bool): whether get heartrate signal or breathrate signal
Returns:
None
"""
video_file = os.path.join(path, 'data.avi')
vidcap = cv2.VideoCapture(video_file)
fps = vidcap.get(cv2.CAP_PROP_FPS)
timestamps = [vidcap.get(cv2.CAP_PROP_POS_MSEC) / 1000] # convert MSEC to SEC
print(f'Processing {video_file}, fps {fps}')
subject_file = h5py.File(os.path.join(path, 'data.hdf5'), 'r')
# Processing video
count = 0
while vidcap.isOpened():
success, image = vidcap.read()
if success:
cv2.imwrite(os.path.join(output, 'images', format(count, '04d') + '.bmp'), image)
count += 1
timestamps.append(vidcap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
else:
break
vidcap.release()
# Processing image time stamps
image_file = os.path.join(output, 'image_timestamps.csv')
with open(image_file, 'w') as file:
header = ['ID', 'Time']
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for frame, time in zip(range(count), timestamps):
writer.writerow({'ID': frame,
'Time': time})
pulse_time = subject_file['time']
if breathing:
pulse = subject_file['respiration']
else:
pulse = subject_file['pulse']
# Processing pulse
pulse_file = os.path.join(output, 'ground_truth.csv')
with open(pulse_file, 'w') as file:
header = ['Time', 'PulseWaveform']
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for time, pulse_val in zip(pulse_time, pulse):
writer.writerow({'Time': time,
'PulseWaveform': pulse_val})
def main(cl_args=None):
"""process cohface.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args_parsed = parse_command_line(cl_args)
input_path = args_parsed.input_path
output_path = args_parsed.output_path
start_subject_id = args_parsed.start_subject_id
end_subject_id = args_parsed.end_subject_id
breathing_flag = args_parsed.breathing_rate
session_number = 4
for sub in range(start_subject_id, end_subject_id):
for fol in range(session_number):
input_dir = os.path.join(input_path, str(sub), str(fol))
output_dir = os.path.join(output_path, str(sub), str(fol))
os.makedirs(os.path.join(output_dir, 'images'))
process_subject(input_dir, output_dir, breathing=breathing_flag)
if __name__ == '__main__':
main()
| tao_front_end_services-main | notebooks/cli/dataset_prepare/heartratenet/process_cohface.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts Retail Product Checkout (https://www.kaggle.com/datasets/diyer22/retail-product-checkout-dataset) dataset to classification dataset. Ready for MLRecogNet training.
"""
import os, zipfile
import glob
import cv2
from pycocotools.coco import COCO
from tqdm import tqdm
import numpy as np
import shutil
def create_reference_set(dataset_dir, ref_dir, ref_num = 100):
os.makedirs(ref_dir, exist_ok=True)
classes = os.listdir(dataset_dir)
print(f"Creating reference set from {dataset_dir}...")
for class_name in tqdm(classes):
samples = os.listdir(os.path.join(dataset_dir, class_name))
if not os.path.exists(os.path.join(ref_dir, class_name)):
os.makedirs(os.path.join(ref_dir, class_name))
if len(samples) >= ref_num:
ref_samples = np.random.choice(samples, ref_num, replace=False)
else:
print(f"Warning: {class_name} has only {len(samples)} samples. Copying all samples to reference set.")
ref_samples = samples
for sample in ref_samples:
try:
shutil.copy(os.path.join(dataset_dir, class_name, sample), os.path.join(ref_dir, class_name, sample))
except:
pass
print("Done!")
def crop_images(file_path, bbox, class_id, output_dir):
file_name = os.path.basename(file_path)
class_folder = os.path.join(output_dir, class_id)
if not os.path.exists(class_folder):
os.mkdir(class_folder)
image_count = len(glob.glob( os.path.join(class_folder, file_name+"*.jpg")))
new_file_name = os.path.join(class_folder, file_name + f"_{image_count+1}.jpg")
if os.path.exists(new_file_name):
# skip if file already exists
return
# start processing image
x1, y1, x2, y2 = bbox
# skip if bbox is too small
if x2 < 120 or y2 < 150:
return
try:
image = cv2.imread(file_path)
h, w, _ = image.shape
except:
print(f"{file_path} is not a valid image file")
return
# give 14% margin to the bounding box
cropped_image = image[max(int(y1 - 0.07*y2), 0 ):min(int(y1+1.07*y2), h), \
max(int(x1 - 0.07*x2), 0 ):min(int(x1+1.07*x2), w)]
# resize to 256x256 for faster processing and training
resized_cropped_image = cv2.resize(cropped_image, (256, 256), cv2.INTER_AREA)
cv2.imwrite(os.path.join(class_folder, new_file_name), resized_cropped_image)
# load dataset
data_root_dir = os.path.join(os.environ['DATA_DIR'],"metric_learning_recognition")
path_to_zip_file = os.path.join(data_root_dir,"retail-product-checkout-dataset.zip")
directory_to_extract_to = os.path.join(data_root_dir, "retail-product-checkout-dataset")
processed_classification_dir = os.path.join(data_root_dir,"retail-product-checkout-dataset_classification_demo")
## unzip dataset
if not os.path.exists(processed_classification_dir):
os.makedirs(processed_classification_dir)
print("Unzipping dataset...")
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
directory_to_extract_to = os.path.join(directory_to_extract_to, "retail_product_checkout")
for dataset in ["train", "val", "test"]:
dataset_dir = os.path.join(directory_to_extract_to, dataset+"2019")
annotation_file = os.path.join(directory_to_extract_to, "instances_"+dataset+"2019.json")
output_dir = os.path.join(processed_classification_dir, dataset)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
## load coco dataset
print(f"Loading COCO {dataset} dataset...")
coco_label = COCO(annotation_file)
# crop images to classification data
for img_object in tqdm(coco_label.dataset["images"]):
image_path = os.path.join(dataset_dir, img_object["file_name"])
# remove top view images
if "camera2" in image_path:
continue
image_id = img_object["id"]
annotation_ids = coco_label.getAnnIds(imgIds=image_id)
for annot in coco_label.loadAnns(annotation_ids):
bbox = annot["bbox"]
class_id = annot["category_id"]
category = coco_label.loadCats(class_id)[0]
class_name = category["supercategory"] + "_" + category["name"]
crop_images(image_path, bbox, class_name, output_dir)
# extract a reference set from training set
## fixed random seed for reproducibility
np.random.seed(0)
create_reference_set(
os.path.join(processed_classification_dir, "train"), \
os.path.join(processed_classification_dir, "reference"), \
ref_num=100)
# split out unknown classes
# select 20% classes as unknown classes
class_list = os.listdir(os.path.join(processed_classification_dir, "train"))
total_class_num = len(class_list)
unknown_classes = np.random.choice(class_list, int(total_class_num*0.2), replace=False)
known_classes = [c for c in class_list if c not in unknown_classes]
known_classes_dir = os.path.join(processed_classification_dir, "known_classes")
unknown_classes_dir = os.path.join(processed_classification_dir, "unknown_classes")
for dataset in ["train", "val", "test", "reference"]:
known_classes_dataset_dir = os.path.join(known_classes_dir, dataset)
unknown_classes_dataset_dir = os.path.join(unknown_classes_dir, dataset)
if not os.path.exists(known_classes_dataset_dir):
os.makedirs(known_classes_dataset_dir)
if not os.path.exists(unknown_classes_dataset_dir):
os.makedirs(unknown_classes_dataset_dir)
for class_name in tqdm(known_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(known_classes_dataset_dir, class_name))
for class_name in tqdm(unknown_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(unknown_classes_dataset_dir, class_name))
# remove old folders
for dataset in ["train", "val", "test", "reference"]:
shutil.rmtree(os.path.join(processed_classification_dir, dataset))
| tao_front_end_services-main | notebooks/cli/dataset_prepare/metric_learning_recognition/process_retail_product_checkout_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import cv2
import csv
import ujson
classes = set([])
def read_kitti(prefix, label_file):
"Function wrapper to read kitti format labels txt file."
global classes
full_label_path = os.path.join(prefix, label_file)
if not full_label_path.endswith(".txt"):
return
if not os.path.exists(full_label_path):
raise ValueError("Labelfile : {} does not exist".format(full_label_path))
if os.path.isdir(full_label_path):
return
dict_list = []
image_name = full_label_path.replace("/labels", "/images").replace(".txt", ".jpg")
if not os.path.exists(image_name):
raise ValueError("Image : {} does not exist".format(image_name))
img = cv2.imread(image_name, 0)
height, width = img.shape[:2]
with open(full_label_path, 'r') as lf:
for row in csv.reader(lf, delimiter=' '):
classes.add(row[0])
dict_list.append({"class_name": row[0],
"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width,
"bbox": [float(row[4]), float(row[5]), float(row[6]) - float(row[4]), float(row[7]) - float(row[5])]})
if (dict_list == []):
dict_list = [{"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width}]
return dict_list
def construct_coco_json(labels_folder):
image_id = 0
annot_ctr = 0
labels = []
for file in os.listdir(labels_folder):
label = read_kitti(labels_folder, file)
labels.append(label)
categories = []
class_to_id_mapping = {}
for idx, object_class in enumerate(classes):
class_to_id_mapping[object_class] = idx + 1
categories.append({"supercategory": object_class, "id": idx + 1, "name": object_class})
coco_json = {"images": [], "annotations": [], "categories": categories}
for label in labels:
if not (label and len(label)):
continue
coco_json["images"].append({"file_name": label[0]["file_name"], "height": label[0]["height"], "width": label[0]["width"], "id": image_id})
for instance in label:
if ("bbox" in instance.keys()):
coco_json["annotations"].append({"bbox": instance["bbox"],
"image_id": image_id,
"id": annot_ctr,
"category_id": class_to_id_mapping[instance["class_name"]],
"bbox_mode": 1,
"segmentation": [],
"iscrowd": 0,
"area": float(instance["bbox"][2] * instance["bbox"][3])})
annot_ctr += 1
image_id += 1
return coco_json
label_folder = sys.argv[1]
coco_json = construct_coco_json(label_folder)
current_str = ujson.dumps(coco_json, indent=4)
with open(sys.argv[2] + "/annotations.json", "w") as json_out_file:
json_out_file.write(current_str)
label_map_extension = sys.argv[3]
with open(f"{sys.argv[2]}/label_map.{label_map_extension}", "w") as label_map_file:
for idx, class_name in enumerate(classes):
if label_map_extension == "yaml":
label_map_file.write(f"{idx+1}: '{class_name}'\n")
else:
label_map_file.write(f"{class_name}\n")
label_map_file.flush()
print(len(classes))
| tao_front_end_services-main | notebooks/cli/dataset_prepare/kitti/kitti_to_coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibration of KITTI dataset."""
import numpy as np
def get_calib_from_file(calib_file):
"""Get calibration from file."""
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
"""Calibration class."""
def __init__(self, calib_file):
"""Initialize."""
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pointpillars/calibration_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def drop_class(label_dir, classes):
"""drop label by class names."""
labels = os.listdir(label_dir)
labels = [os.path.join(label_dir, x) for x in labels]
for gt in labels:
print("Processing ", gt)
with open(gt) as f:
lines = f.readlines()
lines_ret = []
for line in lines:
ls = line.strip()
line = ls.split()
if line[0] in classes:
print("Dropping ", line[0])
continue
else:
lines_ret.append(ls)
with open(gt, "w") as fo:
out = '\n'.join(lines_ret)
fo.write(out)
if __name__ == "__main__":
drop_class(sys.argv[1], sys.argv[2].split(','))
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pointpillars/drop_class.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
from object3d_kitti import get_objects_from_label
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Convert camera label to LiDAR label.")
parser.add_argument(
"-l", "--label_dir",
type=str, required=True,
help="Camera label directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR label directory"
)
return parser.parse_args()
def generate_lidar_labels(label_dir, calib_dir, output_dir):
"""Generate LiDAR labels from KITTI Camera labels."""
for lab in os.listdir(label_dir):
lab_file = os.path.join(label_dir, lab)
obj_list = get_objects_from_label(lab_file)
calib_file = os.path.join(calib_dir, lab)
calib = Calibration(calib_file)
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
loc_lidar = calib.rect_to_lidar(loc)
# update obj3d.loc
with open(os.path.join(output_dir, lab), "w") as lf:
for idx, lc in enumerate(loc_lidar):
# bottom center to 3D center
obj_list[idx].loc = (lc + np.array([0., 0., obj_list[idx].h / 2.]))
# rotation_y to rotation_z
obj_list[idx].ry = -np.pi / 2. - obj_list[idx].ry
lf.write(obj_list[idx].to_kitti_format())
lf.write('\n')
if __name__ == "__main__":
args = parse_args()
generate_lidar_labels(args.label_dir, args.calib_dir, args.output_dir)
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pointpillars/gen_lidar_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def split(list_file, lidar, label, output_lidar, output_label):
"""train/val split of the KITTI dataset."""
with open(list_file) as lf:
file_names = lf.readlines()
file_names = [f.strip() for f in file_names]
for li in os.listdir(lidar):
if li[:-4] in file_names:
os.rename(os.path.join(lidar, li), os.path.join(output_lidar, li))
for la in os.listdir(label):
if la[:-4] in file_names:
os.rename(os.path.join(label, la), os.path.join(output_label, la))
if __name__ == "__main__":
split(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pointpillars/kitti_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def get_objects_from_label(label_file):
"""Get objects from label."""
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
"""Convert class type to ID."""
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
"""Object3d class."""
def __init__(self, line):
"""Initialize."""
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10]) # noqa: E741
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
"""Get KITTI object difficult level."""
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
if height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
if height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
"""Convert to string."""
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, self.loc, self.ry)
return print_str
def to_kitti_format(self):
"""Convert to KITTI format."""
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], self.ry)
return kitti_str
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pointpillars/object3d_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
from skimage import io
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Limit LIDAR points to FOV range.")
parser.add_argument(
"-p", "--points_dir",
type=str, required=True,
help="LIDAR points directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR points directory"
)
parser.add_argument(
"-i",
"--image_dir",
type=str, required=True,
help="image directory"
)
return parser.parse_args()
def get_fov_flag(pts_rect, img_shape, calib):
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_lidar_points(points_dir, calib_dir, output_dir, image_dir):
"""Limit LiDAR points to FOV range."""
for pts in os.listdir(points_dir):
pts_file = os.path.join(points_dir, pts)
points = np.fromfile(pts_file, dtype=np.float32).reshape(-1, 4)
calib_file = os.path.join(calib_dir, pts[:-4] + ".txt")
calib = Calibration(calib_file)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
img_file = os.path.join(image_dir, pts[:-4] + ".png")
img_shape = np.array(io.imread(img_file).shape[:2], dtype=np.int32)
fov_flag = get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
points.tofile(os.path.join(output_dir, pts))
# double check
points_cp = np.fromfile(os.path.join(output_dir, pts), dtype=np.float32).reshape(-1, 4)
assert np.equal(points, points_cp).all()
if __name__ == "__main__":
args = parse_args()
generate_lidar_points(
args.points_dir, args.calib_dir,
args.output_dir, args.image_dir
)
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pointpillars/gen_lidar_points.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for Unet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
from PIL import Image, ImageSequence
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for UNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to ISBI Tiff Files"
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to processes images from ISBI Tiff files."
)
return parser.parse_args(args)
def save_arrays_as_images(arr, im_dir):
"""Utility function to save the images to dir from arrays."""
for idx, arr in enumerate(arr):
img_name = os.path.join(im_dir, "image_{}.png".format(idx))
cv2.imwrite(img_name, arr)
def load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def check_and_create(d):
"""Utility function to create a dir if not present"""
if not os.path.isdir(d):
os.makedirs(d)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
train_images_tif = os.path.join(args.input_dir, "train-volume.tif")
train_masks_tif = os.path.join(args.input_dir, "train-labels.tif")
test_images_tif = os.path.join(args.input_dir, "test-volume.tif")
output_images_dir = os.path.join(args.output_dir, "images")
output_masks_dir = os.path.join(args.output_dir, "masks")
# Creating the images dir for train, test, val
train_images_dir = os.path.join(output_images_dir, "train")
val_images_dir = os.path.join(output_images_dir, "val")
test_images_dir = os.path.join(output_images_dir, "test")
train_masks_dir = os.path.join(output_masks_dir, "train")
val_masks_dir = os.path.join(output_masks_dir, "val")
check_and_create(train_images_dir)
check_and_create(val_images_dir)
check_and_create(test_images_dir)
check_and_create(train_masks_dir)
check_and_create(val_masks_dir)
train_np_arrays_images = load_multipage_tiff(train_images_tif)
train_np_arrays_masks = load_multipage_tiff(train_masks_tif)
test_np_arrays_images = load_multipage_tiff(test_images_tif)
# Splitting the train numpy arrays into train and val
train_np_arrays_images_final = train_np_arrays_images[:20, :, :]
train_np_arrays_masks_final = train_np_arrays_masks[:20, :, :]
val_np_arrays_images_final = train_np_arrays_images[20:, :, :]
val_np_arrays_masks_final = train_np_arrays_masks[20:, :, :]
# Saving the train arrays as images
save_arrays_as_images(train_np_arrays_images_final, train_images_dir)
save_arrays_as_images(train_np_arrays_masks_final, train_masks_dir)
# Saving the val arrays as images
save_arrays_as_images(val_np_arrays_images_final, val_images_dir)
save_arrays_as_images(val_np_arrays_masks_final, val_masks_dir)
# Saving the test arrays as images
save_arrays_as_images(test_np_arrays_images, test_images_dir)
print("Prepared data successfully !")
if __name__ == "__main__":
main()
| tao_front_end_services-main | notebooks/cli/dataset_prepare/unet/prepare_data_isbi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import re
import random
import sys
def sample_dataset(input_dir, output_dir, n_samples, use_ids=None):
"""Select a subset of images fom input_dir and move them to output_dir.
Args:
input_dir (str): Input Folder Path of the train images.
output_dir (str): Output Folder Path of the test images.
n_samples (int): Number of samples to use.
use_ids(list int): List of IDs to grab from test and query folder.
Returns:
IDs used for sampling
"""
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
id_to_img = {}
# Grab images with matching ids
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid not in id_to_img:
id_to_img[pid] = []
id_to_img[pid].append(img_path)
# Create directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
assert id_to_img, "Dataset size cannot be 0."
sampled_id_to_img = dict(random.sample(list(id_to_img.items()), n_samples))
for key, img_paths in sampled_id_to_img.items():
for img_path in img_paths:
command = "cp " + img_path + " " + output_dir
os.system(command)
# Use same ids for test and query
if use_ids:
# Create query dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
# Find images in test with same id
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
for id in use_ids:
pattern = re.compile(r'([-\d]+)_c(\d)')
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if id == pid:
print(img_path)
command = "cp " + img_path + " " + output_dir
os.system(command)
return sampled_id_to_img.keys()
# Number of samples
n_samples = int(sys.argv[1])
data_dir = os.path.join(os.environ["DATA_DIR"], "market1501")
# Create train dataset
train_input_dir = os.path.join(data_dir, "bounding_box_train")
train_output_dir = os.path.join(data_dir, "sample_train")
sample_dataset(train_input_dir, train_output_dir, n_samples)
# Create test dataset
test_input_dir = os.path.join(data_dir, "bounding_box_test")
test_output_dir = os.path.join(data_dir, "sample_test")
ids = sample_dataset(test_input_dir, test_output_dir, n_samples)
# Create query dataset
query_input_dir = os.path.join(data_dir, "query")
query_output_dir = os.path.join(data_dir, "sample_query")
sample_dataset(query_input_dir, query_output_dir, n_samples, ids)
| tao_front_end_services-main | notebooks/cli/dataset_prepare/re_identification/obtain_subset_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
data_dir = os.path.join(os.environ["DATA_DIR"], "kinetics")
# front_raises: 134
# pull_ups: 255
# clean_and_jerk: 59
# presenting_weather_forecast: 254
# deadlifting: 88
selected_actions = {
134: 0,
255: 1,
59: 2,
254: 3,
88: 4
}
def select_actions(selected_actions, data_dir, split_name):
"""Select a subset of actions and their corresponding labels.
Args:
selected_actions (dict): Map from selected class IDs to new class IDs.
data_dir (str): Path to the directory of data arrays (.npy) and labels (.pkl).
split_name (str): Name of the split to be processed, e.g., "train" and "val".
Returns:
No explicit returns
"""
data_path = os.path.join(data_dir, f"{split_name}_data.npy")
label_path = os.path.join(data_dir, f"{split_name}_label.pkl")
data_array = np.load(file=data_path)
with open(label_path, "rb") as label_file:
labels = pickle.load(label_file)
assert (len(labels) == 2)
assert (data_array.shape[0] == len(labels[0]))
assert (len(labels[0]) == len(labels[1]))
print(f"No. total samples for {split_name}: {data_array.shape[0]}")
selected_indices = []
for i in range(data_array.shape[0]):
if labels[1][i] in selected_actions.keys():
selected_indices.append(i)
data_array = data_array[selected_indices, :, :, :, :]
selected_sample_names = [labels[0][x] for x in selected_indices]
selected_labels = [selected_actions[labels[1][x]] for x in selected_indices]
labels = (selected_sample_names, selected_labels)
print(f"No. selected samples for {split_name}: {data_array.shape[0]}")
np.save(file=data_path, arr=data_array, allow_pickle=False)
with open(label_path, "wb") as label_file:
pickle.dump(labels, label_file, protocol=4)
select_actions(selected_actions, data_dir, "train")
select_actions(selected_actions, data_dir, "val")
| tao_front_end_services-main | notebooks/cli/dataset_prepare/pose_classification/select_subset_actions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Clean the label to alphanumeric, non-sensitive (lower case). Filter the label with length larger than 25
import os
import re
import sys
from tqdm import tqdm
def preprocess_label(gt_file, filtered_file):
gt_list = open(gt_file, "r").readlines()
filtered_list = []
character_list = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for label_line in tqdm(gt_list):
try:
path, label = label_line.strip().split()
except Exception:
continue
path = path[:-1]
label = label.strip("\"")
if re.search(f"[^{character_list}]", label):
continue
else:
if len(label) <= 25:
label = label.lower() # ignore the case
filtered_list.append(f"{path}\t{label}\n")
with open(filtered_file, "w") as f:
f.writelines(filtered_list)
def main():
preprocess_label(sys.argv[1], sys.argv[2])
character_list = "0123456789abcdefghijklmnopqrstuvwxyz"
with open(os.path.join(os.getenv("DATA_DIR"), "character_list"), "w") as f:
for ch in character_list:
f.write(f"{ch}\n")
if __name__ == "__main__":
main() | tao_front_end_services-main | notebooks/cli/dataset_prepare/ocrnet/preprocess_label.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for LPRNet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for LPRNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to OpenALPR's benchmark end2end us license plates."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT train/eval dataset."
)
return parser.parse_args(args)
def prepare_data(input_dir, img_list, output_dir):
"""Crop the license plates from the orginal images."""
target_img_path = os.path.join(output_dir, "image")
target_label_path = os.path.join(output_dir, "label")
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
for img_name in img_list:
img_path = os.path.join(input_dir, img_name)
label_path = os.path.join(input_dir,
img_name.split(".")[0] + ".txt")
img = cv2.imread(img_path)
with open(label_path, "r") as f:
label_lines = f.readlines()
assert len(label_lines) == 1
label_items = label_lines[0].split()
assert img_name == label_items[0]
xmin = int(label_items[1])
ymin = int(label_items[2])
width = int(label_items[3])
xmax = xmin + width
height = int(label_items[4])
ymax = ymin + height
lp = label_items[5]
cropped_lp = img[ymin:ymax, xmin:xmax, :]
# save img and label
cv2.imwrite(os.path.join(target_img_path, img_name), cropped_lp)
with open(os.path.join(target_label_path,
img_name.split(".")[0] + ".txt"), "w") as f:
f.write(lp)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
img_files = []
for file_name in os.listdir(args.input_dir):
if file_name.split(".")[-1] == "jpg":
img_files.append(file_name)
total_cnt = len(img_files)
train_cnt = int(total_cnt / 2)
val_cnt = total_cnt - train_cnt
train_img_list = img_files[0:train_cnt]
val_img_list = img_files[train_cnt + 1:]
print("Total {} samples in benchmark dataset".format(total_cnt))
print("{} for train and {} for val".format(train_cnt, val_cnt))
train_dir = os.path.join(args.output_dir, "train")
prepare_data(args.input_dir, train_img_list, train_dir)
val_dir = os.path.join(args.output_dir, "val")
prepare_data(args.input_dir, val_img_list, val_dir)
if __name__ == "__main__":
main()
| tao_front_end_services-main | notebooks/api/dataset_prepare/lprnet/preprocess_openalpr_benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPENet data conversion utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import cv2
import os
import numpy as np
import json
def get_keypoints_from_file(keypoints_file):
'''
This function reads the keypoints file from afw format.
Input:
keypoints_file (str): Path to the keypoints file.
Output:
keypoints (np.array): Keypoints in numpy format [[x, y], [x, y]].
'''
keypoints = []
with open(keypoints_file) as fid:
for line in fid:
if "version" in line or "points" in line or "{" in line or "}" in line:
continue
else:
loc_x, loc_y = line.strip().split(sep=" ")
keypoints.append([float(loc_x), float(loc_y)])
keypoints = np.array(keypoints, dtype=float)
assert keypoints.shape[1] == 2, "Keypoints should be 2D."
return keypoints
def convert_dataset(container_root_path, afw_data_path, output_json_path, afw_image_save_path, key_points=80):
'''
Function to convert afw dataset to Sloth format json.
Input:
afw_data_path (str): Path to afw data folder.
output_json_path (str): Path to output json file.
afw_image_save_path (str): Image paths to use in json.
Returns:
None
'''
# get dataset file lists
all_files = os.listdir(afw_data_path)
images = [x for x in all_files if x.endswith('.jpg')]
# keypoint_files = [img_path.split(".")[-2] + ".pts" for img_path in images]
output_folder = os.path.dirname(output_json_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# read and convert to sloth format
sloth_data = []
for image in images:
image_path = os.path.join(afw_data_path, image)
image_read = cv2.imread(image_path)
if image_read is None:
print('Bad image:{}'.format(image_path))
continue
# convert image to png
image_png = image.replace('.jpg', '.png')
cv2.imwrite(os.path.join(afw_data_path, image_png), image_read)
image_data = {}
image_data['filename'] = os.path.join(container_root_path, "data/afw", image_png)
image_data['class'] = 'image'
annotations = {}
annotations['tool-version'] = '1.0'
annotations['version'] = 'v1'
annotations['class'] = 'FiducialPoints'
keypoint_file = image.split(".")[-2] + ".pts"
image_keypoints = get_keypoints_from_file(os.path.join(afw_data_path, keypoint_file))
if key_points == 80:
for num, keypoint in enumerate(image_keypoints):
annotations["P{}x".format(num + 1)] = keypoint[0]
annotations["P{}y".format(num + 1)] = keypoint[1]
# fill in dummy keypoints for keypoints 69 to 80
for num in range(69, 81, 1):
annotations["P{}x".format(num)] = image_keypoints[0][0]
annotations["P{}y".format(num)] = image_keypoints[0][1]
annotations["P{}occluded".format(num)] = True
elif key_points == 10:
key_id = 1
for num, keypoint in enumerate(image_keypoints):
# change to 10-points dataset:
if (num + 1) in [1, 9, 17, 20, 25, 39, 45, 34, 49, 55]:
annotations["P{}x".format(key_id)] = keypoint[0]
annotations["P{}y".format(key_id)] = keypoint[1]
key_id += 1
else:
raise ValueError("This script only generates 10 & 80 keypoints dataset.")
image_data['annotations'] = [annotations]
sloth_data.append(image_data)
# save json
with open(output_json_path, "w") as config_file:
json.dump(sloth_data, config_file, indent=4)
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(
description='Transform dataset for FPENet tutorial')
parser.add_argument(
"--afw_data_path",
type=str,
required=True,
help="Input directory to AFW dataset imnages and ground truth keypoints."
)
parser.add_argument(
"--container_root_path",
type=str,
required=True,
help="Path of image folder with respect to the container"
)
parser.add_argument(
"--output_json_path",
type=str,
required=True,
help="Output json file path to save to."
)
parser.add_argument(
"--afw_image_save_path",
type=str,
required=True,
help="Image path to use in jsons."
)
parser.add_argument(
"--num_key_points",
type=int,
default=80,
help="Number of key points."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args()
convert_dataset(args.container_root_path, args.afw_data_path, args.output_json_path, args.afw_image_save_path, args.num_key_points)
| tao_front_end_services-main | notebooks/api/dataset_prepare/fpenet/data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts Retail Product Checkout (https://www.kaggle.com/datasets/diyer22/retail-product-checkout-dataset) dataset to classification dataset. Ready for MLRecogNet training.
"""
import os, zipfile
import glob
import cv2
from pycocotools.coco import COCO
from tqdm import tqdm
import numpy as np
import shutil
def create_reference_set(dataset_dir, ref_dir, ref_num = 100):
os.makedirs(ref_dir, exist_ok=True)
classes = os.listdir(dataset_dir)
print(f"Creating reference set from {dataset_dir}...")
for class_name in tqdm(classes):
samples = os.listdir(os.path.join(dataset_dir, class_name))
if not os.path.exists(os.path.join(ref_dir, class_name)):
os.makedirs(os.path.join(ref_dir, class_name))
if len(samples) >= ref_num:
ref_samples = np.random.choice(samples, ref_num, replace=False)
else:
print(f"Warning: {class_name} has only {len(samples)} samples. Copying all samples to reference set.")
ref_samples = samples
for sample in ref_samples:
try:
shutil.copy(os.path.join(dataset_dir, class_name, sample), os.path.join(ref_dir, class_name, sample))
except:
pass
print("Done!")
def crop_images(file_path, bbox, class_id, output_dir):
file_name = os.path.basename(file_path)
class_folder = os.path.join(output_dir, class_id)
if not os.path.exists(class_folder):
os.mkdir(class_folder)
image_count = len(glob.glob( os.path.join(class_folder, file_name+"*.jpg")))
new_file_name = os.path.join(class_folder, file_name + f"_{image_count+1}.jpg")
if os.path.exists(new_file_name):
# skip if file already exists
return
# start processing image
x1, y1, x2, y2 = bbox
# skip if bbox is too small
if x2 < 120 or y2 < 150:
return
try:
image = cv2.imread(file_path)
h, w, _ = image.shape
except:
print(f"{file_path} is not a valid image file")
return
# give 14% margin to the bounding box
cropped_image = image[max(int(y1 - 0.07*y2), 0 ):min(int(y1+1.07*y2), h), \
max(int(x1 - 0.07*x2), 0 ):min(int(x1+1.07*x2), w)]
# resize to 256x256 for faster processing and training
resized_cropped_image = cv2.resize(cropped_image, (256, 256), cv2.INTER_AREA)
cv2.imwrite(os.path.join(class_folder, new_file_name), resized_cropped_image)
# load dataset
data_root_dir = os.path.join(os.environ['DATA_DIR'],"metric_learning_recognition")
path_to_zip_file = os.path.join(data_root_dir,"retail-product-checkout-dataset.zip")
directory_to_extract_to = os.path.join(data_root_dir, "retail-product-checkout-dataset")
processed_classification_dir = os.path.join(data_root_dir,"retail-product-checkout-dataset_classification_demo")
## unzip dataset
if not os.path.exists(processed_classification_dir):
os.makedirs(processed_classification_dir)
print("Unzipping dataset...")
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
directory_to_extract_to = os.path.join(directory_to_extract_to, "retail_product_checkout")
for dataset in ["train", "val", "test"]:
dataset_dir = os.path.join(directory_to_extract_to, dataset+"2019")
annotation_file = os.path.join(directory_to_extract_to, "instances_"+dataset+"2019.json")
output_dir = os.path.join(processed_classification_dir, dataset)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
## load coco dataset
print(f"Loading COCO {dataset} dataset...")
coco_label = COCO(annotation_file)
# crop images to classification data
for img_object in tqdm(coco_label.dataset["images"]):
image_path = os.path.join(dataset_dir, img_object["file_name"])
# remove top view images
if "camera2" in image_path:
continue
image_id = img_object["id"]
annotation_ids = coco_label.getAnnIds(imgIds=image_id)
for annot in coco_label.loadAnns(annotation_ids):
bbox = annot["bbox"]
class_id = annot["category_id"]
category = coco_label.loadCats(class_id)[0]
class_name = category["supercategory"] + "_" + category["name"]
crop_images(image_path, bbox, class_name, output_dir)
# extract a reference set from training set
## fixed random seed for reproducibility
np.random.seed(0)
create_reference_set(
os.path.join(processed_classification_dir, "train"), \
os.path.join(processed_classification_dir, "reference"), \
ref_num=100)
# split out unknown classes
# select 20% classes as unknown classes
class_list = os.listdir(os.path.join(processed_classification_dir, "train"))
total_class_num = len(class_list)
unknown_classes = np.random.choice(class_list, int(total_class_num*0.2), replace=False)
known_classes = [c for c in class_list if c not in unknown_classes]
known_classes_dir = os.path.join(processed_classification_dir, "known_classes")
unknown_classes_dir = os.path.join(processed_classification_dir, "unknown_classes")
for dataset in ["train", "val", "test", "reference"]:
known_classes_dataset_dir = os.path.join(known_classes_dir, dataset)
unknown_classes_dataset_dir = os.path.join(unknown_classes_dir, dataset)
if not os.path.exists(known_classes_dataset_dir):
os.makedirs(known_classes_dataset_dir)
if not os.path.exists(unknown_classes_dataset_dir):
os.makedirs(unknown_classes_dataset_dir)
for class_name in tqdm(known_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(known_classes_dataset_dir, class_name))
for class_name in tqdm(unknown_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(unknown_classes_dataset_dir, class_name))
# remove old folders
for dataset in ["train", "val", "test", "reference"]:
shutil.rmtree(os.path.join(processed_classification_dir, dataset))
| tao_front_end_services-main | notebooks/api/dataset_prepare/metric_learning_recognition/process_retail_product_checkout_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import cv2
import csv
import ujson
classes = set([])
def read_kitti(prefix, label_file):
"Function wrapper to read kitti format labels txt file."
global classes
full_label_path = os.path.join(prefix, label_file)
if not full_label_path.endswith(".txt"):
return
if not os.path.exists(full_label_path):
raise ValueError("Labelfile : {} does not exist".format(full_label_path))
if os.path.isdir(full_label_path):
return
dict_list = []
image_name = full_label_path.replace("/labels", "/images").replace(".txt", ".jpg")
if not os.path.exists(image_name):
raise ValueError("Image : {} does not exist".format(image_name))
img = cv2.imread(image_name, 0)
height, width = img.shape[:2]
with open(full_label_path, 'r') as lf:
for row in csv.reader(lf, delimiter=' '):
classes.add(row[0])
dict_list.append({"class_name": row[0],
"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width,
"bbox": [float(row[4]), float(row[5]), float(row[6]) - float(row[4]), float(row[7]) - float(row[5])]})
if (dict_list == []):
dict_list = [{"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width}]
return dict_list
def construct_coco_json(labels_folder):
image_id = 0
annot_ctr = 0
labels = []
for file in os.listdir(labels_folder):
label = read_kitti(labels_folder, file)
labels.append(label)
categories = []
class_to_id_mapping = {}
for idx, object_class in enumerate(classes):
class_to_id_mapping[object_class] = idx + 1
categories.append({"supercategory": object_class, "id": idx + 1, "name": object_class})
coco_json = {"images": [], "annotations": [], "categories": categories}
for label in labels:
if not (label and len(label)):
continue
coco_json["images"].append({"file_name": label[0]["file_name"], "height": label[0]["height"], "width": label[0]["width"], "id": image_id})
for instance in label:
if ("bbox" in instance.keys()):
coco_json["annotations"].append({"bbox": instance["bbox"],
"image_id": image_id,
"id": annot_ctr,
"category_id": class_to_id_mapping[instance["class_name"]],
"bbox_mode": 1,
"segmentation": [],
"iscrowd": 0,
"area": float(instance["bbox"][2] * instance["bbox"][3])})
annot_ctr += 1
image_id += 1
return coco_json
label_folder = sys.argv[1]
coco_json = construct_coco_json(label_folder)
current_str = ujson.dumps(coco_json, indent=4)
with open(sys.argv[2] + "/annotations.json", "w") as json_out_file:
json_out_file.write(current_str)
label_map_extension = sys.argv[3]
with open(f"{sys.argv[2]}/label_map.{label_map_extension}", "w") as label_map_file:
for idx, class_name in enumerate(classes):
if label_map_extension == "yaml":
label_map_file.write(f"{idx+1}: '{class_name}'\n")
else:
label_map_file.write(f"{class_name}\n")
label_map_file.flush()
print(len(classes))
| tao_front_end_services-main | notebooks/api/dataset_prepare/kitti/kitti_to_coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibration of KITTI dataset."""
import numpy as np
def get_calib_from_file(calib_file):
"""Get calibration from file."""
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
"""Calibration class."""
def __init__(self, calib_file):
"""Initialize."""
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| tao_front_end_services-main | notebooks/api/dataset_prepare/pointpillars/calibration_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import sys
def drop_class(label_dir, classes):
"""drop label by class names."""
labels = os.listdir(label_dir)
labels = [os.path.join(label_dir, x) for x in labels]
for gt in labels:
print("Processing ", gt)
with open(gt) as f:
lines = f.readlines()
lines_ret = []
for line in lines:
ls = line.strip()
line = ls.split()
if line[0] in classes:
print("Dropping ", line[0])
continue
else:
lines_ret.append(ls)
with open(gt, "w") as fo:
out = '\n'.join(lines_ret)
fo.write(out)
if __name__ == "__main__":
drop_class(sys.argv[1], sys.argv[2].split(','))
| tao_front_end_services-main | notebooks/api/dataset_prepare/pointpillars/drop_class.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import argparse
import numpy as np
from object3d_kitti import get_objects_from_label
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Convert camera label to LiDAR label.")
parser.add_argument(
"-l", "--label_dir",
type=str, required=True,
help="Camera label directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR label directory"
)
return parser.parse_args()
def generate_lidar_labels(label_dir, calib_dir, output_dir):
"""Generate LiDAR labels from KITTI Camera labels."""
for lab in os.listdir(label_dir):
lab_file = os.path.join(label_dir, lab)
obj_list = get_objects_from_label(lab_file)
calib_file = os.path.join(calib_dir, lab)
calib = Calibration(calib_file)
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
loc_lidar = calib.rect_to_lidar(loc)
# update obj3d.loc
with open(os.path.join(output_dir, lab), "w") as lf:
for idx, lc in enumerate(loc_lidar):
# bottom center to 3D center
obj_list[idx].loc = (lc + np.array([0., 0., obj_list[idx].h / 2.]))
# rotation_y to rotation_z
obj_list[idx].ry = -np.pi / 2. - obj_list[idx].ry
lf.write(obj_list[idx].to_kitti_format())
lf.write('\n')
if __name__ == "__main__":
args = parse_args()
generate_lidar_labels(args.label_dir, args.calib_dir, args.output_dir)
| tao_front_end_services-main | notebooks/api/dataset_prepare/pointpillars/gen_lidar_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import sys
def split(list_file, lidar, label, output_lidar, output_label):
"""train/val split of the KITTI dataset."""
with open(list_file) as lf:
file_names = lf.readlines()
file_names = [f.strip() for f in file_names]
for li in os.listdir(lidar):
if li[:-4] in file_names:
os.rename(os.path.join(lidar, li), os.path.join(output_lidar, li))
for la in os.listdir(label):
if la[:-4] in file_names:
os.rename(os.path.join(label, la), os.path.join(output_label, la))
if __name__ == "__main__":
split(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| tao_front_end_services-main | notebooks/api/dataset_prepare/pointpillars/kitti_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D object KITTI utils."""
import numpy as np
def get_objects_from_label(label_file):
"""Get objects from label."""
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
"""Convert class type to ID."""
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
"""Object3d class."""
def __init__(self, line):
"""Initialize."""
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10]) # noqa: E741
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
"""Get KITTI object difficult level."""
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
if height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
if height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
"""Convert to string."""
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, self.loc, self.ry)
return print_str
def to_kitti_format(self):
"""Convert to KITTI format."""
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], self.ry)
return kitti_str
| tao_front_end_services-main | notebooks/api/dataset_prepare/pointpillars/object3d_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import argparse
import numpy as np
from skimage import io
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Limit LIDAR points to FOV range.")
parser.add_argument(
"-p", "--points_dir",
type=str, required=True,
help="LIDAR points directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR points directory"
)
parser.add_argument(
"-i",
"--image_dir",
type=str, required=True,
help="image directory"
)
return parser.parse_args()
def get_fov_flag(pts_rect, img_shape, calib):
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_lidar_points(points_dir, calib_dir, output_dir, image_dir):
"""Limit LiDAR points to FOV range."""
for pts in os.listdir(points_dir):
pts_file = os.path.join(points_dir, pts)
points = np.fromfile(pts_file, dtype=np.float32).reshape(-1, 4)
calib_file = os.path.join(calib_dir, pts[:-4] + ".txt")
calib = Calibration(calib_file)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
img_file = os.path.join(image_dir, pts[:-4] + ".png")
img_shape = np.array(io.imread(img_file).shape[:2], dtype=np.int32)
fov_flag = get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
points.tofile(os.path.join(output_dir, pts))
# double check
points_cp = np.fromfile(os.path.join(output_dir, pts), dtype=np.float32).reshape(-1, 4)
assert np.equal(points, points_cp).all()
if __name__ == "__main__":
args = parse_args()
generate_lidar_points(
args.points_dir, args.calib_dir,
args.output_dir, args.image_dir
)
| tao_front_end_services-main | notebooks/api/dataset_prepare/pointpillars/gen_lidar_points.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for Unet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
from PIL import Image, ImageSequence
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for UNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to ISBI Tiff Files"
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to processes images from ISBI Tiff files."
)
return parser.parse_args(args)
def save_arrays_as_images(arr, im_dir):
"""Utility function to save the images to dir from arrays."""
for idx, arr in enumerate(arr):
img_name = os.path.join(im_dir, "image_{}.png".format(idx))
cv2.imwrite(img_name, arr)
def load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def check_and_create(d):
"""Utility function to create a dir if not present"""
if not os.path.isdir(d):
os.makedirs(d)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
train_images_tif = os.path.join(args.input_dir, "train-volume.tif")
train_masks_tif = os.path.join(args.input_dir, "train-labels.tif")
test_images_tif = os.path.join(args.input_dir, "test-volume.tif")
output_images_dir = os.path.join(args.output_dir, "images")
output_masks_dir = os.path.join(args.output_dir, "masks")
# Creating the images dir for train, test, val
train_images_dir = os.path.join(output_images_dir, "train")
val_images_dir = os.path.join(output_images_dir, "val")
test_images_dir = os.path.join(output_images_dir, "test")
train_masks_dir = os.path.join(output_masks_dir, "train")
val_masks_dir = os.path.join(output_masks_dir, "val")
check_and_create(train_images_dir)
check_and_create(val_images_dir)
check_and_create(test_images_dir)
check_and_create(train_masks_dir)
check_and_create(val_masks_dir)
train_np_arrays_images = load_multipage_tiff(train_images_tif)
train_np_arrays_masks = load_multipage_tiff(train_masks_tif)
test_np_arrays_images = load_multipage_tiff(test_images_tif)
# Splitting the train numpy arrays into train and val
train_np_arrays_images_final = train_np_arrays_images[:20, :, :]
train_np_arrays_masks_final = train_np_arrays_masks[:20, :, :]
val_np_arrays_images_final = train_np_arrays_images[20:, :, :]
val_np_arrays_masks_final = train_np_arrays_masks[20:, :, :]
# Saving the train arrays as images
save_arrays_as_images(train_np_arrays_images_final, train_images_dir)
save_arrays_as_images(train_np_arrays_masks_final, train_masks_dir)
# Saving the val arrays as images
save_arrays_as_images(val_np_arrays_images_final, val_images_dir)
save_arrays_as_images(val_np_arrays_masks_final, val_masks_dir)
# Saving the test arrays as images
save_arrays_as_images(test_np_arrays_images, test_images_dir)
print("Prepared data successfully !")
if __name__ == "__main__":
main()
| tao_front_end_services-main | notebooks/api/dataset_prepare/unet/prepare_data_isbi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import re
import random
import sys
def sample_dataset(input_dir, output_dir, n_samples, use_ids=None):
"""Select a subset of images fom input_dir and move them to output_dir.
Args:
input_dir (str): Input Folder Path of the train images.
output_dir (str): Output Folder Path of the test images.
n_samples (int): Number of samples to use.
use_ids(list int): List of IDs to grab from test and query folder.
Returns:
IDs used for sampling
"""
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
id_to_img = {}
# Grab images with matching ids
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid not in id_to_img:
id_to_img[pid] = []
id_to_img[pid].append(img_path)
# Create directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
assert id_to_img, "Dataset size cannot be 0."
sampled_id_to_img = dict(random.sample(list(id_to_img.items()), n_samples))
for key, img_paths in sampled_id_to_img.items():
for img_path in img_paths:
command = "cp " + img_path + " " + output_dir
os.system(command)
# Use same ids for test and query
if use_ids:
# Create query dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
# Find images in test with same id
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
for id in use_ids:
pattern = re.compile(r'([-\d]+)_c(\d)')
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if id == pid:
print(img_path)
command = "cp " + img_path + " " + output_dir
os.system(command)
return sampled_id_to_img.keys()
# Number of samples
n_samples = int(sys.argv[1])
data_dir = os.path.join(os.environ["DATA_DIR"], "market1501")
# Create train dataset
train_input_dir = os.path.join(data_dir, "bounding_box_train")
train_output_dir = os.path.join(data_dir, "sample_train")
sample_dataset(train_input_dir, train_output_dir, n_samples)
# Create test dataset
test_input_dir = os.path.join(data_dir, "bounding_box_test")
test_output_dir = os.path.join(data_dir, "sample_test")
ids = sample_dataset(test_input_dir, test_output_dir, n_samples)
# Create query dataset
query_input_dir = os.path.join(data_dir, "query")
query_output_dir = os.path.join(data_dir, "sample_query")
sample_dataset(query_input_dir, query_output_dir, n_samples, ids)
| tao_front_end_services-main | notebooks/api/dataset_prepare/re_identification/obtain_subset_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
data_dir = os.path.join(os.environ["DATA_DIR"], "kinetics")
# front_raises: 134
# pull_ups: 255
# clean_and_jerk: 59
# presenting_weather_forecast: 254
# deadlifting: 88
selected_actions = {
134: 0,
255: 1,
59: 2,
254: 3,
88: 4
}
def select_actions(selected_actions, data_dir, split_name):
"""Select a subset of actions and their corresponding labels.
Args:
selected_actions (dict): Map from selected class IDs to new class IDs.
data_dir (str): Path to the directory of data arrays (.npy) and labels (.pkl).
split_name (str): Name of the split to be processed, e.g., "train" and "val".
Returns:
No explicit returns
"""
data_path = os.path.join(data_dir, f"{split_name}_data.npy")
label_path = os.path.join(data_dir, f"{split_name}_label.pkl")
data_array = np.load(file=data_path)
with open(label_path, "rb") as label_file:
labels = pickle.load(label_file)
assert (len(labels) == 2)
assert (data_array.shape[0] == len(labels[0]))
assert (len(labels[0]) == len(labels[1]))
print(f"No. total samples for {split_name}: {data_array.shape[0]}")
selected_indices = []
for i in range(data_array.shape[0]):
if labels[1][i] in selected_actions.keys():
selected_indices.append(i)
data_array = data_array[selected_indices, :, :, :, :]
selected_sample_names = [labels[0][x] for x in selected_indices]
selected_labels = [selected_actions[labels[1][x]] for x in selected_indices]
labels = (selected_sample_names, selected_labels)
print(f"No. selected samples for {split_name}: {data_array.shape[0]}")
np.save(file=data_path, arr=data_array, allow_pickle=False)
with open(label_path, "wb") as label_file:
pickle.dump(labels, label_file, protocol=4)
select_actions(selected_actions, data_dir, "train")
select_actions(selected_actions, data_dir, "val")
| tao_front_end_services-main | notebooks/api/dataset_prepare/pose_classification/select_subset_actions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Clean the label to alphanumeric, non-sensitive (lower case). Filter the label with length larger than 25
import os
import re
import sys
from tqdm import tqdm
def preprocess_label(gt_file, filtered_file):
gt_list = open(gt_file, "r").readlines()
filtered_list = []
character_list = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for label_line in tqdm(gt_list):
try:
path, label = label_line.strip().split()
except Exception:
continue
path = path[:-1]
label = label.strip("\"")
if re.search(f"[^{character_list}]", label):
continue
else:
if len(label) <= 25:
label = label.lower() # ignore the case
filtered_list.append(f"{path}\t{label}\n")
with open(filtered_file, "w") as f:
f.writelines(filtered_list)
def main():
preprocess_label(sys.argv[1], sys.argv[2])
character_list = "0123456789abcdefghijklmnopqrstuvwxyz"
with open(os.path.join(os.getenv("DATA_DIR"), "character_list"), "w") as f:
for ch in character_list:
f.write(f"{ch}\n")
if __name__ == "__main__":
main() | tao_front_end_services-main | notebooks/api/dataset_prepare/ocrnet/preprocess_label.py |
__author__ = 'Sean Griffin'
__version__ = '1.0.0'
__email__ = '[email protected]'
import sys
import os.path
import json
import shutil
from pymel.core import *
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'faces',
'materials', 'diffuseMaps', 'specularMaps', 'bumpMaps', 'copyTextures',
'bones', 'skeletalAnim', 'bakeAnimations', 'prettyOutput']
def write(self, path, optionString, accessMode):
self.path = path
self._parseOptions(optionString)
self.verticeOffset = 0
self.uvOffset = 0
self.vertices = []
self.materials = []
self.faces = []
self.normals = []
self.uvs = []
self.morphTargets = []
self.bones = []
self.animations = []
self.skinIndices = []
self.skinWeights = []
if self.options["bakeAnimations"]:
print("exporting animations")
self._exportAnimations()
self._goToFrame(self.options["startFrame"])
if self.options["materials"]:
print("exporting materials")
self._exportMaterials()
if self.options["bones"]:
print("exporting bones")
select(map(lambda m: m.getParent(), ls(type='mesh')))
runtime.GoToBindPose()
self._exportBones()
print("exporting skins")
self._exportSkins()
print("exporting meshes")
self._exportMeshes()
if self.options["skeletalAnim"]:
print("exporting keyframe animations")
self._exportKeyframeAnimations()
print("writing file")
output = {
'metadata': {
'formatVersion': 3.1,
'generatedBy': 'Maya Exporter'
},
'vertices': self.vertices,
'uvs': [self.uvs],
'faces': self.faces,
'normals': self.normals,
'materials': self.materials,
}
if self.options['bakeAnimations']:
output['morphTargets'] = self.morphTargets
if self.options['bones']:
output['bones'] = self.bones
output['skinIndices'] = self.skinIndices
output['skinWeights'] = self.skinWeights
output['influencesPerVertex'] = self.options["influencesPerVertex"]
if self.options['skeletalAnim']:
output['animations'] = self.animations
with file(path, 'w') as f:
if self.options['prettyOutput']:
f.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
else:
f.write(json.dumps(output, separators=(",",":")))
def _allMeshes(self):
if not hasattr(self, '__allMeshes'):
self.__allMeshes = filter(lambda m: len(m.listConnections()) > 0, ls(type='mesh'))
return self.__allMeshes
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
for key in self.componentKeys:
self.options[key] = key in optionsString
if self.options["bones"]:
boneOptionsString = optionsString[optionsString.find("bones"):]
boneOptions = boneOptionsString.split(' ')
self.options["influencesPerVertex"] = int(boneOptions[1])
if self.options["bakeAnimations"]:
bakeAnimOptionsString = optionsString[optionsString.find("bakeAnimations"):]
bakeAnimOptions = bakeAnimOptionsString.split(' ')
self.options["startFrame"] = int(bakeAnimOptions[1])
self.options["endFrame"] = int(bakeAnimOptions[2])
self.options["stepFrame"] = int(bakeAnimOptions[3])
def _exportMeshes(self):
if self.options['vertices']:
self._exportVertices()
for mesh in self._allMeshes():
self._exportMesh(mesh)
def _exportMesh(self, mesh):
print("Exporting " + mesh.name())
if self.options['faces']:
print("Exporting faces")
self._exportFaces(mesh)
self.verticeOffset += len(mesh.getPoints())
self.uvOffset += mesh.numUVs()
if self.options['normals']:
print("Exporting normals")
self._exportNormals(mesh)
if self.options['uvs']:
print("Exporting UVs")
self._exportUVs(mesh)
def _getMaterialIndex(self, face, mesh):
if not hasattr(self, '_materialIndices'):
self._materialIndices = dict([(mat['DbgName'], i) for i, mat in enumerate(self.materials)])
if self.options['materials']:
for engine in mesh.listConnections(type='shadingEngine'):
if sets(engine, isMember=face):
for material in engine.listConnections(type='lambert'):
if self._materialIndices.has_key(material.name()):
return self._materialIndices[material.name()]
return -1
def _exportVertices(self):
self.vertices += self._getVertices()
def _exportAnimations(self):
for frame in self._framesToExport():
self._exportAnimationForFrame(frame)
def _framesToExport(self):
return range(self.options["startFrame"], self.options["endFrame"], self.options["stepFrame"])
def _exportAnimationForFrame(self, frame):
print("exporting frame " + str(frame))
self._goToFrame(frame)
self.morphTargets.append({
'name': "frame_" + str(frame),
'vertices': self._getVertices()
})
def _getVertices(self):
return [coord for mesh in self._allMeshes() for point in mesh.getPoints(space='world') for coord in [round(point.x, FLOAT_PRECISION), round(point.y, FLOAT_PRECISION), round(point.z, FLOAT_PRECISION)]]
def _goToFrame(self, frame):
currentTime(frame)
def _exportFaces(self, mesh):
typeBitmask = self._getTypeBitmask()
for face in mesh.faces:
materialIndex = self._getMaterialIndex(face, mesh)
hasMaterial = materialIndex != -1
self._exportFaceBitmask(face, typeBitmask, hasMaterial=hasMaterial)
self.faces += map(lambda x: x + self.verticeOffset, face.getVertices())
if self.options['materials']:
if hasMaterial:
self.faces.append(materialIndex)
if self.options['uvs'] and face.hasUVs():
self.faces += map(lambda v: face.getUVIndex(v) + self.uvOffset, range(face.polygonVertexCount()))
if self.options['normals']:
self._exportFaceVertexNormals(face)
def _exportFaceBitmask(self, face, typeBitmask, hasMaterial=True):
if face.polygonVertexCount() == 4:
faceBitmask = 1
else:
faceBitmask = 0
if hasMaterial:
faceBitmask |= (1 << 1)
if self.options['uvs'] and face.hasUVs():
faceBitmask |= (1 << 3)
self.faces.append(typeBitmask | faceBitmask)
def _exportFaceVertexNormals(self, face):
for i in range(face.polygonVertexCount()):
self.faces.append(face.normalIndex(i))
def _exportNormals(self, mesh):
for normal in mesh.getNormals():
self.normals += [round(normal.x, FLOAT_PRECISION), round(normal.y, FLOAT_PRECISION), round(normal.z, FLOAT_PRECISION)]
def _exportUVs(self, mesh):
us, vs = mesh.getUVs()
for i, u in enumerate(us):
self.uvs.append(u)
self.uvs.append(vs[i])
def _getTypeBitmask(self):
bitmask = 0
if self.options['normals']:
bitmask |= 32
return bitmask
def _exportMaterials(self):
for mat in ls(type='lambert'):
self.materials.append(self._exportMaterial(mat))
def _exportMaterial(self, mat):
result = {
"DbgName": mat.name(),
"blending": "NormalBlending",
"colorDiffuse": map(lambda i: i * mat.getDiffuseCoeff(), mat.getColor().rgb),
"colorAmbient": mat.getAmbientColor().rgb,
"depthTest": True,
"depthWrite": True,
"shading": mat.__class__.__name__,
"transparency": mat.getTransparency().a,
"transparent": mat.getTransparency().a != 1.0,
"vertexColors": False
}
if isinstance(mat, nodetypes.Phong):
result["colorSpecular"] = mat.getSpecularColor().rgb
result["specularCoef"] = mat.getCosPower()
if self.options["specularMaps"]:
self._exportSpecularMap(result, mat)
if self.options["bumpMaps"]:
self._exportBumpMap(result, mat)
if self.options["diffuseMaps"]:
self._exportDiffuseMap(result, mat)
return result
def _exportBumpMap(self, result, mat):
for bump in mat.listConnections(type='bump2d'):
for f in bump.listConnections(type='file'):
result["mapNormalFactor"] = 1
self._exportFile(result, f, "Normal")
def _exportDiffuseMap(self, result, mat):
for f in mat.attr('color').inputs():
result["colorDiffuse"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Diffuse")
def _exportSpecularMap(self, result, mat):
for f in mat.attr('specularColor').inputs():
result["colorSpecular"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Specular")
def _exportFile(self, result, mapFile, mapType):
fName = os.path.basename(mapFile.ftn.get())
if self.options['copyTextures']:
shutil.copy2(mapFile.ftn.get(), os.path.dirname(self.path) + "/" + fName)
result["map" + mapType] = fName
result["map" + mapType + "Repeat"] = [1, 1]
result["map" + mapType + "Wrap"] = ["repeat", "repeat"]
result["map" + mapType + "Anistropy"] = 4
def _exportBones(self):
for joint in ls(type='joint'):
if joint.getParent():
parentIndex = self._indexOfJoint(joint.getParent().name())
else:
parentIndex = -1
rotq = joint.getRotation(quaternion=True) * joint.getOrientation()
pos = joint.getTranslation()
self.bones.append({
"parent": parentIndex,
"name": joint.name(),
"pos": self._roundPos(pos),
"rotq": self._roundQuat(rotq)
})
def _indexOfJoint(self, name):
if not hasattr(self, '_jointNames'):
self._jointNames = dict([(joint.name(), i) for i, joint in enumerate(ls(type='joint'))])
if name in self._jointNames:
return self._jointNames[name]
else:
return -1
def _exportKeyframeAnimations(self):
hierarchy = []
i = -1
frameRate = FramesPerSecond(currentUnit(query=True, time=True)).value()
for joint in ls(type='joint'):
hierarchy.append({
"parent": i,
"keys": self._getKeyframes(joint, frameRate)
})
i += 1
self.animations.append({
"name": "skeletalAction.001",
"length": (playbackOptions(maxTime=True, query=True) - playbackOptions(minTime=True, query=True)) / frameRate,
"fps": 1,
"hierarchy": hierarchy
})
def _getKeyframes(self, joint, frameRate):
firstFrame = playbackOptions(minTime=True, query=True)
lastFrame = playbackOptions(maxTime=True, query=True)
frames = sorted(list(set(keyframe(joint, query=True) + [firstFrame, lastFrame])))
keys = []
print("joint " + joint.name() + " has " + str(len(frames)) + " keyframes")
for frame in frames:
self._goToFrame(frame)
keys.append(self._getCurrentKeyframe(joint, frame, frameRate))
return keys
def _getCurrentKeyframe(self, joint, frame, frameRate):
pos = joint.getTranslation()
rot = joint.getRotation(quaternion=True) * joint.getOrientation()
return {
'time': (frame - playbackOptions(minTime=True, query=True)) / frameRate,
'pos': self._roundPos(pos),
'rot': self._roundQuat(rot),
'scl': [1,1,1]
}
def _roundPos(self, pos):
return map(lambda x: round(x, FLOAT_PRECISION), [pos.x, pos.y, pos.z])
def _roundQuat(self, rot):
return map(lambda x: round(x, FLOAT_PRECISION), [rot.x, rot.y, rot.z, rot.w])
def _exportSkins(self):
for mesh in self._allMeshes():
print("exporting skins for mesh: " + mesh.name())
skins = filter(lambda skin: mesh in skin.getOutputGeometry(), ls(type='skinCluster'))
if len(skins) > 0:
print("mesh has " + str(len(skins)) + " skins")
skin = skins[0]
joints = skin.influenceObjects()
for weights in skin.getWeights(mesh.vtx):
numWeights = 0
for i in range(0, len(weights)):
if weights[i] > 0:
self.skinWeights.append(weights[i])
self.skinIndices.append(self._indexOfJoint(joints[i].name()))
numWeights += 1
if numWeights > self.options["influencesPerVertex"]:
raise Exception("More than " + str(self.options["influencesPerVertex"]) + " influences on a vertex in " + mesh.name() + ".")
for i in range(0, self.options["influencesPerVertex"] - numWeights):
self.skinWeights.append(0)
self.skinIndices.append(0)
else:
print("mesh has no skins, appending 0")
for i in range(0, len(mesh.getPoints()) * self.options["influencesPerVertex"]):
self.skinWeights.append(0)
self.skinIndices.append(0)
class NullAnimCurve(object):
def getValue(self, index):
return 0.0
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.js'
def defaultExtension(self):
return 'js'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise
class FramesPerSecond(object):
MAYA_VALUES = {
'game': 15,
'film': 24,
'pal': 25,
'ntsc': 30,
'show': 48,
'palf': 50,
'ntscf': 60
}
def __init__(self, fpsString):
self.fpsString = fpsString
def value(self):
if self.fpsString in FramesPerSecond.MAYA_VALUES:
return FramesPerSecond.MAYA_VALUES[self.fpsString]
else:
return int(filter(lambda c: c.isdigit(), self.fpsString))
| three.js-master | utils/exporters/maya/plug-ins/threeJsFileTranslator.py |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Blender importer for Three.js (ASCII JSON format).
"""
import os
import time
import json
import bpy
import mathutils
from mathutils.geometry import tessellate_polygon
from bpy_extras.image_utils import load_image
# #####################################################
# Generators
# #####################################################
def setColor(c, t):
c.r = t[0]
c.g = t[1]
c.b = t[2]
def create_texture(filename, modelpath):
name = filename
texture = bpy.data.textures.new(name, type='IMAGE')
image = load_image(filename, modelpath)
has_data = False
if image:
texture.image = image
has_data = image.has_data
return texture
def create_materials(data, modelpath):
materials = []
materials_data = data.get("materials", [])
for i, m in enumerate(materials_data):
name = m.get("DbgName", "material_%d" % i)
colorAmbient = m.get("colorAmbient", None)
colorDiffuse = m.get("colorDiffuse", None)
colorSpecular = m.get("colorSpecular", None)
alpha = m.get("transparency", 1.0)
specular_hardness = m.get("specularCoef", 0)
mapDiffuse = m.get("mapDiffuse", None)
mapLightmap = m.get("mapLightmap", None)
vertexColorsType = m.get("vertexColors", False)
useVertexColors = False
if vertexColorsType:
useVertexColors = True
material = bpy.data.materials.new(name)
material.THREE_useVertexColors = useVertexColors
if colorDiffuse:
setColor(material.diffuse_color, colorDiffuse)
material.diffuse_intensity = 1.0
if colorSpecular:
setColor(material.specular_color, colorSpecular)
material.specular_intensity = 1.0
if alpha < 1.0:
material.alpha = alpha
material.use_transparency = True
if specular_hardness:
material.specular_hardness = specular_hardness
if mapDiffuse:
texture = create_texture(mapDiffuse, modelpath)
mtex = material.texture_slots.add()
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use = True
mtex.use_map_color_diffuse = True
material.active_texture = texture
materials.append(material)
return materials
def create_mesh_object(name, vertices, materials, face_data, flipYZ, recalculate_normals):
faces = face_data["faces"]
vertexNormals = face_data["vertexNormals"]
vertexColors = face_data["vertexColors"]
vertexUVs = face_data["vertexUVs"]
faceMaterials = face_data["materials"]
faceColors = face_data["faceColors"]
edges = []
# Create a new mesh
me = bpy.data.meshes.new(name)
me.from_pydata(vertices, edges, faces)
# Handle normals
if not recalculate_normals:
me.update(calc_edges = True)
if face_data["hasVertexNormals"]:
print("setting vertex normals")
for fi in range(len(faces)):
if vertexNormals[fi]:
#print("setting face %i with %i vertices" % (fi, len(normals[fi])))
# if me.update() is called after setting vertex normals
# setting face.use_smooth overrides these normals
# - this fixes weird shading artefacts (seems to come from sharing
# of vertices between faces, didn't find a way how to set vertex normals
# per face use of vertex as opposed to per vertex),
# - probably this just overrides all custom vertex normals
# - to preserve vertex normals from the original data
# call me.update() before setting them
me.tessfaces[fi].use_smooth = True
if not recalculate_normals:
for j in range(len(vertexNormals[fi])):
vertexNormal = vertexNormals[fi][j]
x = vertexNormal[0]
y = vertexNormal[1]
z = vertexNormal[2]
if flipYZ:
tmp = y
y = -z
z = tmp
# flip normals (this make them look consistent with the original before export)
#x = -x
#y = -y
#z = -z
vi = me.tessfaces[fi].vertices[j]
me.vertices[vi].normal.x = x
me.vertices[vi].normal.y = y
me.vertices[vi].normal.z = z
if recalculate_normals:
me.update(calc_edges = True)
# Handle colors
if face_data["hasVertexColors"]:
print("setting vertex colors")
me.vertex_colors.new("vertex_color_layer_0")
for fi in range(len(faces)):
if vertexColors[fi]:
face_colors = me.vertex_colors[0].data[fi]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for vi in range(len(vertexColors[fi])):
r = vertexColors[fi][vi][0]
g = vertexColors[fi][vi][1]
b = vertexColors[fi][vi][2]
face_colors[vi].r = r
face_colors[vi].g = g
face_colors[vi].b = b
elif face_data["hasFaceColors"]:
print("setting vertex colors from face colors")
me.vertex_colors.new("vertex_color_layer_0")
for fi in range(len(faces)):
if faceColors[fi]:
r = faceColors[fi][0]
g = faceColors[fi][1]
b = faceColors[fi][2]
face_colors = me.vertex_colors[0].data[fi]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for vi in range(len(faces[fi])):
face_colors[vi].r = r
face_colors[vi].g = g
face_colors[vi].b = b
# Handle uvs
if face_data["hasVertexUVs"]:
print("setting vertex uvs")
for li, layer in enumerate(vertexUVs):
me.uv_textures.new("uv_layer_%d" % li)
for fi in range(len(faces)):
if layer[fi]:
uv_face = me.uv_textures[li].data[fi]
face_uvs = uv_face.uv1, uv_face.uv2, uv_face.uv3, uv_face.uv4
for vi in range(len(layer[fi])):
u = layer[fi][vi][0]
v = layer[fi][vi][1]
face_uvs[vi].x = u
face_uvs[vi].y = v
active_texture = materials[faceMaterials[fi]].active_texture
if active_texture:
uv_face.image = active_texture.image
# Handle materials # 1
if face_data["hasMaterials"]:
print("setting materials (mesh)")
for m in materials:
me.materials.append(m)
print("setting materials (faces)")
for fi in range(len(faces)):
if faceMaterials[fi] >= 0:
me.tessfaces[fi].material_index = faceMaterials[fi]
# Create a new object
ob = bpy.data.objects.new(name, me)
ob.data = me # link the mesh data to the object
scene = bpy.context.scene # get the current scene
scene.objects.link(ob) # link the object into the scene
ob.location = scene.cursor_location # position object at 3d-cursor
# #####################################################
# Faces
# #####################################################
def extract_faces(data):
result = {
"faces" : [],
"materials" : [],
"faceUVs" : [],
"vertexUVs" : [],
"faceNormals" : [],
"vertexNormals" : [],
"faceColors" : [],
"vertexColors" : [],
"hasVertexNormals" : False,
"hasVertexUVs" : False,
"hasVertexColors" : False,
"hasFaceColors" : False,
"hasMaterials" : False
}
faces = data.get("faces", [])
normals = data.get("normals", [])
colors = data.get("colors", [])
offset = 0
zLength = len(faces)
# disregard empty arrays
nUvLayers = 0
for layer in data["uvs"]:
if len(layer) > 0:
nUvLayers += 1
result["faceUVs"].append([])
result["vertexUVs"].append([])
while ( offset < zLength ):
type = faces[ offset ]
offset += 1
isQuad = isBitSet( type, 0 )
hasMaterial = isBitSet( type, 1 )
hasFaceUv = isBitSet( type, 2 )
hasFaceVertexUv = isBitSet( type, 3 )
hasFaceNormal = isBitSet( type, 4 )
hasFaceVertexNormal = isBitSet( type, 5 )
hasFaceColor = isBitSet( type, 6 )
hasFaceVertexColor = isBitSet( type, 7 )
#print("type", type, "bits", isQuad, hasMaterial, hasFaceUv, hasFaceVertexUv, hasFaceNormal, hasFaceVertexNormal, hasFaceColor, hasFaceVertexColor)
result["hasVertexUVs"] = result["hasVertexUVs"] or hasFaceVertexUv
result["hasVertexNormals"] = result["hasVertexNormals"] or hasFaceVertexNormal
result["hasVertexColors"] = result["hasVertexColors"] or hasFaceVertexColor
result["hasFaceColors"] = result["hasFaceColors"] or hasFaceColor
result["hasMaterials"] = result["hasMaterials"] or hasMaterial
# vertices
if isQuad:
a = faces[ offset ]
offset += 1
b = faces[ offset ]
offset += 1
c = faces[ offset ]
offset += 1
d = faces[ offset ]
offset += 1
face = [a, b, c, d]
nVertices = 4
else:
a = faces[ offset ]
offset += 1
b = faces[ offset ]
offset += 1
c = faces[ offset ]
offset += 1
face = [a, b, c]
nVertices = 3
result["faces"].append(face)
# material
if hasMaterial:
materialIndex = faces[ offset ]
offset += 1
else:
materialIndex = -1
result["materials"].append(materialIndex)
# uvs
for i in range(nUvLayers):
faceUv = None
if hasFaceUv:
uvLayer = data["uvs"][ i ]
uvIndex = faces[ offset ]
offset += 1
u = uvLayer[ uvIndex * 2 ]
v = uvLayer[ uvIndex * 2 + 1 ]
faceUv = [u, v]
result["faceUVs"][i].append(faceUv)
if hasFaceVertexUv:
uvLayer = data["uvs"][ i ]
vertexUvs = []
for j in range(nVertices):
uvIndex = faces[ offset ]
offset += 1
u = uvLayer[ uvIndex * 2 ]
v = uvLayer[ uvIndex * 2 + 1 ]
vertexUvs.append([u, v])
result["vertexUVs"][i].append(vertexUvs)
if hasFaceNormal:
normalIndex = faces[ offset ] * 3
offset += 1
x = normals[ normalIndex ]
y = normals[ normalIndex + 1 ]
z = normals[ normalIndex + 2 ]
faceNormal = [x, y, z]
else:
faceNormal = None
result["faceNormals"].append(faceNormal)
if hasFaceVertexNormal:
vertexNormals = []
for j in range(nVertices):
normalIndex = faces[ offset ] * 3
offset += 1
x = normals[ normalIndex ]
y = normals[ normalIndex + 1 ]
z = normals[ normalIndex + 2 ]
vertexNormals.append( [x, y, z] )
else:
vertexNormals = None
result["vertexNormals"].append(vertexNormals)
if hasFaceColor:
colorIndex = faces[ offset ]
offset += 1
faceColor = hexToTuple( colors[ colorIndex ] )
else:
faceColor = None
result["faceColors"].append(faceColor)
if hasFaceVertexColor:
vertexColors = []
for j in range(nVertices):
colorIndex = faces[ offset ]
offset += 1
color = hexToTuple( colors[ colorIndex ] )
vertexColors.append( color )
else:
vertexColors = None
result["vertexColors"].append(vertexColors)
return result
# #####################################################
# Utils
# #####################################################
def hexToTuple( hexColor ):
r = (( hexColor >> 16 ) & 0xff) / 255.0
g = (( hexColor >> 8 ) & 0xff) / 255.0
b = ( hexColor & 0xff) / 255.0
return (r, g, b)
def isBitSet(value, position):
return value & ( 1 << position )
def splitArray(data, chunkSize):
result = []
chunk = []
for i in range(len(data)):
if i > 0 and i % chunkSize == 0:
result.append(chunk)
chunk = []
chunk.append(data[i])
result.append(chunk)
return result
def extract_json_string(text):
marker_begin = "var model ="
marker_end = "postMessage"
start = text.find(marker_begin) + len(marker_begin)
end = text.find(marker_end)
end = text.rfind("}", start, end)
return text[start:end+1].strip()
def get_name(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def get_path(filepath):
return os.path.dirname(filepath)
# #####################################################
# Parser
# #####################################################
def load(operator, context, filepath, option_flip_yz = True, recalculate_normals = True, option_worker = False):
print('\nimporting %r' % filepath)
time_main = time.time()
print("\tparsing JSON file...")
time_sub = time.time()
file = open(filepath, 'rU')
rawcontent = file.read()
file.close()
if option_worker:
json_string = extract_json_string(rawcontent)
else:
json_string = rawcontent
data = json.loads( json_string )
time_new = time.time()
print('parsing %.4f sec' % (time_new - time_sub))
time_sub = time_new
# flip YZ
vertices = splitArray(data["vertices"], 3)
if option_flip_yz:
vertices[:] = [(v[0], -v[2], v[1]) for v in vertices]
# extract faces
face_data = extract_faces(data)
# deselect all
bpy.ops.object.select_all(action='DESELECT')
nfaces = len(face_data["faces"])
nvertices = len(vertices)
nnormals = len(data.get("normals", [])) / 3
ncolors = len(data.get("colors", [])) / 3
nuvs = len(data.get("uvs", [])) / 2
nmaterials = len(data.get("materials", []))
print('\tbuilding geometry...\n\tfaces:%i, vertices:%i, vertex normals: %i, vertex uvs: %i, vertex colors: %i, materials: %i ...' % (
nfaces, nvertices, nnormals, nuvs, ncolors, nmaterials ))
# Create materials
materials = create_materials(data, get_path(filepath))
# Create new obj
create_mesh_object(get_name(filepath), vertices, materials, face_data, option_flip_yz, recalculate_normals)
scene = bpy.context.scene
scene.update()
time_new = time.time()
print('finished importing: %r in %.4f sec.' % (filepath, (time_new - time_main)))
return {'FINISHED'}
if __name__ == "__main__":
register()
| three.js-master | utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/import_threejs.py |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Blender exporter for Three.js (ASCII JSON format).
TODO
- binary format
"""
import bpy
import mathutils
import shutil
import os
import os.path
import math
import operator
import random
# #####################################################
# Configuration
# #####################################################
DEFAULTS = {
"bgcolor" : [0, 0, 0],
"bgalpha" : 1.0,
"position" : [0, 0, 0],
"rotation" : [0, 0, 0],
"scale" : [1, 1, 1],
"camera" :
{
"name" : "default_camera",
"type" : "PerspectiveCamera",
"near" : 1,
"far" : 10000,
"fov" : 60,
"aspect": 1.333,
"position" : [0, 0, 10],
"target" : [0, 0, 0]
},
"light" :
{
"name" : "default_light",
"type" : "DirectionalLight",
"direction" : [0, 1, 1],
"color" : [1, 1, 1],
"intensity" : 0.8
}
}
ROTATE_X_PI2 = mathutils.Quaternion((1.0, 0.0, 0.0), math.radians(-90.0)).to_matrix().to_4x4()
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# skinning
MAX_INFLUENCES = 2
# #####################################################
# Templates - scene
# #####################################################
TEMPLATE_SCENE_ASCII = """\
{
"metadata" :
{
"formatVersion" : 3.2,
"type" : "scene",
"sourceFile" : "%(fname)s",
"generatedBy" : "Blender 2.7 Exporter",
"objects" : %(nobjects)s,
"geometries" : %(ngeometries)s,
"materials" : %(nmaterials)s,
"textures" : %(ntextures)s
},
"urlBaseType" : %(basetype)s,
%(sections)s
"transform" :
{
"position" : %(position)s,
"rotation" : %(rotation)s,
"scale" : %(scale)s
},
"defaults" :
{
"bgcolor" : %(bgcolor)s,
"bgalpha" : %(bgalpha)f,
"camera" : %(defcamera)s
}
}
"""
TEMPLATE_SECTION = """
"%s" :
{
%s
},
"""
TEMPLATE_OBJECT = """\
%(object_id)s : {
"geometry" : %(geometry_id)s,
"groups" : [ %(group_id)s ],
"material" : %(material_id)s,
"position" : %(position)s,
"rotation" : %(rotation)s,
"quaternion": %(quaternion)s,
"scale" : %(scale)s,
"visible" : %(visible)s,
"castShadow" : %(castShadow)s,
"receiveShadow" : %(receiveShadow)s,
"doubleSided" : %(doubleSided)s
}"""
TEMPLATE_EMPTY = """\
%(object_id)s : {
"groups" : [ %(group_id)s ],
"position" : %(position)s,
"rotation" : %(rotation)s,
"quaternion": %(quaternion)s,
"scale" : %(scale)s
}"""
TEMPLATE_GEOMETRY_LINK = """\
%(geometry_id)s : {
"type" : "ascii",
"url" : %(model_file)s
}"""
TEMPLATE_GEOMETRY_EMBED = """\
%(geometry_id)s : {
"type" : "embedded",
"id" : %(embed_id)s
}"""
TEMPLATE_TEXTURE = """\
%(texture_id)s : {
"url": %(texture_file)s%(extras)s
}"""
TEMPLATE_MATERIAL_SCENE = """\
%(material_id)s : {
"type": %(type)s,
"parameters": { %(parameters)s }
}"""
TEMPLATE_CAMERA_PERSPECTIVE = """\
%(camera_id)s : {
"type" : "PerspectiveCamera",
"fov" : %(fov)f,
"aspect": %(aspect)f,
"near" : %(near)f,
"far" : %(far)f,
"position": %(position)s,
"target" : %(target)s
}"""
TEMPLATE_CAMERA_ORTHO = """\
%(camera_id)s : {
"type" : "OrthographicCamera",
"left" : %(left)f,
"right" : %(right)f,
"top" : %(top)f,
"bottom": %(bottom)f,
"near" : %(near)f,
"far" : %(far)f,
"position": %(position)s,
"target" : %(target)s
}"""
TEMPLATE_LIGHT_POINT = """\
%(light_id)s : {
"type" : "PointLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f
}"""
TEMPLATE_LIGHT_SUN = """\
%(light_id)s : {
"type" : "AmbientLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f
}"""
TEMPLATE_LIGHT_SPOT = """\
%(light_id)s : {
"type" : "SpotLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f,
"use_shadow" : %(use_shadow)d,
"angle" : %(angle).3f
}"""
TEMPLATE_LIGHT_HEMI = """\
%(light_id)s : {
"type" : "HemisphereLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f
}"""
TEMPLATE_LIGHT_AREA = """\
%(light_id)s : {
"type" : "AreaLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f,
"gamma" : %(gamma).3f,
"shape" : "%(shape)s",
"size" : %(size).3f,
"size_y" : %(size_y).3f
}"""
TEMPLATE_VEC4 = '[ %g, %g, %g, %g ]'
TEMPLATE_VEC3 = '[ %g, %g, %g ]'
TEMPLATE_VEC2 = '[ %g, %g ]'
TEMPLATE_STRING = '"%s"'
TEMPLATE_HEX = "0x%06x"
# #####################################################
# Templates - model
# #####################################################
TEMPLATE_FILE_ASCII = """\
{
"metadata" :
{
"formatVersion" : 3.1,
"generatedBy" : "Blender 2.7 Exporter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : [%(nuvs)s],
"materials" : %(nmaterial)d,
"morphTargets" : %(nmorphTarget)d,
"bones" : %(nbone)d
},
%(model)s
}
"""
TEMPLATE_MODEL_ASCII = """\
"scale" : %(scale)f,
"materials" : [%(materials)s],
"vertices" : [%(vertices)s],
"morphTargets" : [%(morphTargets)s],
"normals" : [%(normals)s],
"colors" : [%(colors)s],
"uvs" : [%(uvs)s],
"faces" : [%(faces)s],
"bones" : [%(bones)s],
"skinIndices" : [%(indices)s],
"skinWeights" : [%(weights)s],
"animations" : [%(animations)s]
"""
TEMPLATE_VERTEX = "%g,%g,%g"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%g,%g,%g"
TEMPLATE_UV = "%g,%g"
TEMPLATE_C = "%d"
# #####################################################
# Utils
# #####################################################
def veckey3(x,y,z):
return round(x, 6), round(y, 6), round(z, 6)
def veckey3d(v):
return veckey3(v.x, v.y, v.z)
def veckey2d(v):
return round(v[0], 6), round(v[1], 6)
def get_faces(obj):
if hasattr(obj, "tessfaces"):
return obj.tessfaces
else:
return obj.faces
def get_normal_indices(v, normals, mesh):
n = []
mv = mesh.vertices
for i in v:
normal = mv[i].normal
key = veckey3d(normal)
n.append( normals[key] )
return n
def get_uv_indices(face_index, uvs, mesh, layer_index):
uv = []
uv_layer = mesh.tessface_uv_textures[layer_index].data
for i in uv_layer[face_index].uv:
uv.append( uvs[veckey2d(i)] )
return uv
def get_color_indices(face_index, colors, mesh):
c = []
color_layer = mesh.tessface_vertex_colors.active.data
face_colors = color_layer[face_index]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for i in face_colors:
c.append( colors[hexcolor(i)] )
return c
def rgb2int(rgb):
color = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255);
return color
# #####################################################
# Utils - files
# #####################################################
def write_file(fname, content):
out = open(fname, "w", encoding="utf-8")
out.write(content)
out.close()
def ensure_folder_exist(foldername):
"""Create folder (with whole path) if it doesn't exist yet."""
if not os.access(foldername, os.R_OK|os.W_OK|os.X_OK):
os.makedirs(foldername)
def ensure_extension(filepath, extension):
if not filepath.lower().endswith(extension):
filepath += extension
return filepath
def generate_mesh_filename(meshname, filepath):
normpath = os.path.normpath(filepath)
path, ext = os.path.splitext(normpath)
return "%s.%s%s" % (path, meshname, ext)
# #####################################################
# Utils - alignment
# #####################################################
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0].co.x
miny = maxy = vertices[0].co.y
minz = maxz = vertices[0].co.z
for v in vertices[1:]:
if v.co.x < minx:
minx = v.co.x
elif v.co.x > maxx:
maxx = v.co.x
if v.co.y < miny:
miny = v.co.y
elif v.co.y > maxy:
maxy = v.co.y
if v.co.z < minz:
minz = v.co.z
elif v.co.z > maxz:
maxz = v.co.z
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in range(len(vertices)):
vertices[i].co.x += t[0]
vertices[i].co.y += t[1]
vertices[i].co.z += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
# #####################################################
# Elements rendering
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertices(vertices, option_vertices_truncate, option_vertices):
if not option_vertices:
return ""
return ",".join(generate_vertex(v, option_vertices_truncate) for v in vertices)
def generate_vertex(v, option_vertices_truncate):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v.co.x, v.co.y, v.co.z)
else:
return TEMPLATE_VERTEX_TRUNCATE % (v.co.x, v.co.y, v.co.z)
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_vertex_color(c):
return TEMPLATE_C % c
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
# #####################################################
# Model exporter - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces):
if not option_faces:
return "", 0
vertex_offset = 0
material_offset = 0
chunks = []
for mesh, object in meshes:
vertexUV = len(mesh.uv_textures) > 0
vertexColors = len(mesh.vertex_colors) > 0
mesh_colors = option_colors and vertexColors
mesh_uvs = option_uv_coords and vertexUV
if vertexUV:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
mesh_extract_uvs = False
if vertexColors:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
mesh_extract_colors = False
for i, f in enumerate(get_faces(mesh)):
face = generate_face(f, i, normals, uv_layers, colors, mesh, option_normals, mesh_colors, mesh_uvs, option_materials, vertex_offset, material_offset)
chunks.append(face)
vertex_offset += len(mesh.vertices)
material_count = len(mesh.materials)
if material_count == 0:
material_count = 1
material_offset += material_count
return ",".join(chunks), len(chunks)
def generate_face(f, faceIndex, normals, uv_layers, colors, mesh, option_normals, option_colors, option_uv_coords, option_materials, vertex_offset, material_offset):
isTriangle = ( len(f.vertices) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = option_materials
hasFaceUvs = False # not supported in Blender
hasFaceVertexUvs = option_uv_coords
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = option_normals
hasFaceColors = False # not supported in Blender
hasFaceVertexColors = option_colors
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = f.vertices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
index = f.material_index + material_offset
faceData.append( index )
if hasFaceVertexUvs:
for layer_index, uvs in enumerate(uv_layers):
uv = get_uv_indices(faceIndex, uvs, mesh, layer_index)
for i in range(nVertices):
index = uv[i]
faceData.append(index)
if hasFaceVertexNormals:
n = get_normal_indices(f.vertices, normals, mesh)
for i in range(nVertices):
index = n[i]
faceData.append(index)
if hasFaceVertexColors:
c = get_color_indices(faceIndex, colors, mesh)
for i in range(nVertices):
index = c[i]
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Model exporter - normals
# #####################################################
def extract_vertex_normals(mesh, normals, count):
for f in get_faces(mesh):
for v in f.vertices:
normal = mesh.vertices[v].normal
key = veckey3d(normal)
if key not in normals:
normals[key] = count
count += 1
return count
def generate_normals(normals, option_normals):
if not option_normals:
return ""
chunks = []
for key, index in sorted(normals.items(), key = operator.itemgetter(1)):
chunks.append(key)
return ",".join(generate_normal(n) for n in chunks)
# #####################################################
# Model exporter - vertex colors
# #####################################################
def extract_vertex_colors(mesh, colors, count):
color_layer = mesh.tessface_vertex_colors.active.data
for face_index, face in enumerate(get_faces(mesh)):
face_colors = color_layer[face_index]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for c in face_colors:
key = hexcolor(c)
if key not in colors:
colors[key] = count
count += 1
return count
def generate_vertex_colors(colors, option_colors):
if not option_colors:
return ""
chunks = []
for key, index in sorted(colors.items(), key=operator.itemgetter(1)):
chunks.append(key)
return ",".join(generate_vertex_color(c) for c in chunks)
# #####################################################
# Model exporter - UVs
# #####################################################
def extract_uvs(mesh, uv_layers, counts):
for index, layer in enumerate(mesh.tessface_uv_textures):
if len(uv_layers) <= index:
uvs = {}
count = 0
uv_layers.append(uvs)
counts.append(count)
else:
uvs = uv_layers[index]
count = counts[index]
uv_layer = layer.data
for face_index, face in enumerate(get_faces(mesh)):
for uv_index, uv in enumerate(uv_layer[face_index].uv):
key = veckey2d(uv)
if key not in uvs:
uvs[key] = count
count += 1
counts[index] = count
return counts
def generate_uvs(uv_layers, option_uv_coords):
if not option_uv_coords:
return "[]"
layers = []
for uvs in uv_layers:
chunks = []
for key, index in sorted(uvs.items(), key=operator.itemgetter(1)):
chunks.append(key)
layer = ",".join(generate_uv(n) for n in chunks)
layers.append(layer)
return ",".join("[%s]" % n for n in layers)
# ##############################################################################
# Model exporter - armature
# (only the first armature will exported)
# ##############################################################################
def get_armature():
if len(bpy.data.armatures) == 0:
print("Warning: no armatures in the scene")
return None, None
armature = bpy.data.armatures[0]
# Someone please figure out a proper way to get the armature node
for object in bpy.data.objects:
if object.type == 'ARMATURE':
return armature, object
print("Warning: no node of type 'ARMATURE' in the scene")
return None, None
# ##############################################################################
# Model exporter - bones
# (only the first armature will exported)
# ##############################################################################
def generate_bones(meshes, option_bones, flipyz):
if not option_bones:
return "", 0
armature, armature_object = get_armature()
if armature_object is None:
return "", 0
hierarchy = []
armature_matrix = armature_object.matrix_world
pose_bones = armature_object.pose.bones
#pose_bones = armature.bones
TEMPLATE_BONE = '{"parent":%d,"name":"%s","pos":[%g,%g,%g],"rotq":[%g,%g,%g,%g],"scl":[%g,%g,%g]}'
for pose_bone in pose_bones:
armature_bone = pose_bone.bone
#armature_bone = pose_bone
bonePos = armature_matrix * armature_bone.head_local
boneIndex = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_matrix = armature_matrix * armature_bone.parent.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = i = 0
for pose_parent in pose_bones:
armature_parent = pose_parent.bone
#armature_parent = pose_parent
if armature_parent.name == armature_bone.parent.name:
bone_index = i
i += 1
pos, rot, scl = bone_matrix.decompose()
if flipyz:
joint = TEMPLATE_BONE % (bone_index, armature_bone.name, pos.x, pos.z, -pos.y, rot.x, rot.z, -rot.y, rot.w, scl.x, scl.z, scl.y)
hierarchy.append(joint)
else:
joint = TEMPLATE_BONE % (bone_index, armature_bone.name, pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w, scl.x, scl.y, scl.z)
hierarchy.append(joint)
bones_string = ",".join(hierarchy)
return bones_string, len(pose_bones)
# ##############################################################################
# Model exporter - skin indices and weights
# ##############################################################################
def generate_indices_and_weights(meshes, option_skinning):
if not option_skinning or len(bpy.data.armatures) == 0:
return "", ""
indices = []
weights = []
armature, armature_object = get_armature()
for mesh, object in meshes:
i = 0
mesh_index = -1
# find the original object
for obj in bpy.data.objects:
if obj.name == mesh.name or obj == object:
mesh_index = i
i += 1
if mesh_index == -1:
print("generate_indices: couldn't find object for mesh", mesh.name)
continue
object = bpy.data.objects[mesh_index]
for vertex in mesh.vertices:
# sort bones by influence
bone_array = []
for group in vertex.groups:
index = group.group
weight = group.weight
bone_array.append( (index, weight) )
bone_array.sort(key = operator.itemgetter(1), reverse=True)
# select first N bones
for i in range(MAX_INFLUENCES):
if i < len(bone_array):
bone_proxy = bone_array[i]
found = 0
index = bone_proxy[0]
weight = bone_proxy[1]
for j, bone in enumerate(armature_object.pose.bones):
if object.vertex_groups[index].name == bone.name:
indices.append('%d' % j)
weights.append('%g' % weight)
found = 1
break
if found != 1:
indices.append('0')
weights.append('0')
else:
indices.append('0')
weights.append('0')
indices_string = ",".join(indices)
weights_string = ",".join(weights)
return indices_string, weights_string
# ##############################################################################
# Model exporter - skeletal animation
# (only the first action will exported)
# ##############################################################################
def generate_animation(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time, index):
if not option_animation_skeletal or len(bpy.data.actions) == 0:
return ""
# TODO: Add scaling influences
action = bpy.data.actions[index]
# get current context and then switch to dopesheet temporarily
current_context = bpy.context.area.type
bpy.context.area.type = "DOPESHEET_EDITOR"
bpy.context.space_data.mode = "ACTION"
# set active action
bpy.context.area.spaces.active.action = action
armature, armature_object = get_armature()
if armature_object is None or armature is None:
return "", 0
#armature_object = bpy.data.objects['marine_rig']
armature_matrix = armature_object.matrix_world
fps = bpy.data.scenes[0].render.fps
end_frame = action.frame_range[1]
start_frame = action.frame_range[0]
frame_length = end_frame - start_frame
used_frames = int(frame_length / option_frame_step) + 1
TEMPLATE_KEYFRAME_FULL = '{"time":%g,"pos":[%g,%g,%g],"rot":[%g,%g,%g,%g],"scl":[%g,%g,%g]}'
TEMPLATE_KEYFRAME_BEGIN = '{"time":%g'
TEMPLATE_KEYFRAME_END = '}'
TEMPLATE_KEYFRAME_POS = ',"pos":[%g,%g,%g]'
TEMPLATE_KEYFRAME_ROT = ',"rot":[%g,%g,%g,%g]'
TEMPLATE_KEYFRAME_SCL = ',"scl":[%g,%g,%g]'
keys = []
channels_location = []
channels_rotation = []
channels_scale = []
# Precompute per-bone data
for pose_bone in armature_object.pose.bones:
armature_bone = pose_bone.bone
keys.append([])
channels_location.append( find_channels(action, armature_bone, "location"))
channels_rotation.append( find_channels(action, armature_bone, "rotation_quaternion"))
channels_rotation.append( find_channels(action, armature_bone, "rotation_euler"))
channels_scale.append( find_channels(action, armature_bone, "scale"))
# Process all frames
for frame_i in range(0, used_frames):
#print("Processing frame %d/%d" % (frame_i, used_frames))
# Compute the index of the current frame (snap the last index to the end)
frame = start_frame + frame_i * option_frame_step
if frame_i == used_frames-1:
frame = end_frame
# Compute the time of the frame
if option_frame_index_as_time:
time = frame - start_frame
else:
time = (frame - start_frame) / fps
# Let blender compute the pose bone transformations
bpy.data.scenes[0].frame_set(frame)
# Process all bones for the current frame
bone_index = 0
for pose_bone in armature_object.pose.bones:
# Extract the bone transformations
if pose_bone.parent is None:
bone_matrix = armature_matrix * pose_bone.matrix
else:
parent_matrix = armature_matrix * pose_bone.parent.matrix
bone_matrix = armature_matrix * pose_bone.matrix
bone_matrix = parent_matrix.inverted() * bone_matrix
pos, rot, scl = bone_matrix.decompose()
pchange = True or has_keyframe_at(channels_location[bone_index], frame)
rchange = True or has_keyframe_at(channels_rotation[bone_index], frame)
schange = True or has_keyframe_at(channels_scale[bone_index], frame)
if flipyz:
px, py, pz = pos.x, pos.z, -pos.y
rx, ry, rz, rw = rot.x, rot.z, -rot.y, rot.w
sx, sy, sz = scl.x, scl.z, scl.y
else:
px, py, pz = pos.x, pos.y, pos.z
rx, ry, rz, rw = rot.x, rot.y, rot.z, rot.w
sx, sy, sz = scl.x, scl.y, scl.z
# START-FRAME: needs pos, rot and scl attributes (required frame)
if frame == start_frame:
keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw, sx, sy, sz)
keys[bone_index].append(keyframe)
# END-FRAME: needs pos, rot and scl attributes with animation length (required frame)
elif frame == end_frame:
keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw, sx, sy, sz)
keys[bone_index].append(keyframe)
# MIDDLE-FRAME: needs only one of the attributes, can be an empty frame (optional frame)
elif pchange == True or rchange == True:
keyframe = TEMPLATE_KEYFRAME_BEGIN % time
if pchange == True:
keyframe = keyframe + TEMPLATE_KEYFRAME_POS % (px, py, pz)
if rchange == True:
keyframe = keyframe + TEMPLATE_KEYFRAME_ROT % (rx, ry, rz, rw)
if schange == True:
keyframe = keyframe + TEMPLATE_KEYFRAME_SCL % (sx, sy, sz)
keyframe = keyframe + TEMPLATE_KEYFRAME_END
keys[bone_index].append(keyframe)
bone_index += 1
# Gather data
parents = []
bone_index = 0
for pose_bone in armature_object.pose.bones:
keys_string = ",".join(keys[bone_index])
parent_index = bone_index - 1 # WTF? Also, this property is not used by three.js
parent = '{"parent":%d,"keys":[%s]}' % (parent_index, keys_string)
bone_index += 1
parents.append(parent)
hierarchy_string = ",".join(parents)
if option_frame_index_as_time:
length = frame_length
else:
length = frame_length / fps
animation_string = '"name":"%s","fps":%d,"length":%g,"hierarchy":[%s]' % (action.name, fps, length, hierarchy_string)
bpy.data.scenes[0].frame_set(start_frame)
# reset context
bpy.context.area.type = current_context
return animation_string
def find_channels(action, bone, channel_type):
bone_name = bone.name
ngroups = len(action.groups)
result = []
# Variant 1: channels grouped by bone names
if ngroups > 0:
# Find the channel group for the given bone
group_index = -1
for i in range(ngroups):
if action.groups[i].name == bone_name:
group_index = i
# Get all desired channels in that group
if group_index > -1:
for channel in action.groups[group_index].channels:
if channel_type in channel.data_path:
result.append(channel)
# Variant 2: no channel groups, bone names included in channel names
else:
bone_label = '"%s"' % bone_name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and channel_type in data_path:
result.append(channel)
return result
def find_keyframe_at(channel, frame):
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
return keyframe
return None
def has_keyframe_at(channels, frame):
for channel in channels:
if not find_keyframe_at(channel, frame) is None:
return True
return False
def generate_all_animations(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time):
all_animations_string = ""
if option_animation_skeletal:
for index in range(0, len(bpy.data.actions)):
if index != 0 :
all_animations_string += ", \n"
all_animations_string += "{" + generate_animation(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time,index) + "}"
return all_animations_string
def handle_position_channel(channel, frame, position):
change = False
if channel.array_index in [0, 1, 2]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 0:
position.x = value
if channel.array_index == 1:
position.y = value
if channel.array_index == 2:
position.z = value
return change
def position(bone, frame, action, armatureMatrix):
position = mathutils.Vector((0,0,0))
change = False
ngroups = len(action.groups)
if ngroups > 0:
index = 0
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
for channel in action.groups[index].channels:
if "location" in channel.data_path:
hasChanged = handle_position_channel(channel, frame, position)
change = change or hasChanged
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "location" in data_path:
hasChanged = handle_position_channel(channel, frame, position)
change = change or hasChanged
position = position * bone.matrix_local.inverted()
if bone.parent == None:
position.x += bone.head.x
position.y += bone.head.y
position.z += bone.head.z
else:
parent = bone.parent
parentInvertedLocalMatrix = parent.matrix_local.inverted()
parentHeadTailDiff = parent.tail_local - parent.head_local
position.x += (bone.head * parentInvertedLocalMatrix).x + parentHeadTailDiff.x
position.y += (bone.head * parentInvertedLocalMatrix).y + parentHeadTailDiff.y
position.z += (bone.head * parentInvertedLocalMatrix).z + parentHeadTailDiff.z
return armatureMatrix*position, change
def handle_rotation_channel(channel, frame, rotation):
change = False
if channel.array_index in [0, 1, 2, 3]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 1:
rotation.x = value
elif channel.array_index == 2:
rotation.y = value
elif channel.array_index == 3:
rotation.z = value
elif channel.array_index == 0:
rotation.w = value
return change
def rotation(bone, frame, action, armatureMatrix):
# TODO: calculate rotation also from rotation_euler channels
rotation = mathutils.Vector((0,0,0,1))
change = False
ngroups = len(action.groups)
# animation grouped by bones
if ngroups > 0:
index = -1
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
if index > -1:
for channel in action.groups[index].channels:
if "quaternion" in channel.data_path:
hasChanged = handle_rotation_channel(channel, frame, rotation)
change = change or hasChanged
# animation in raw fcurves
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "quaternion" in data_path:
hasChanged = handle_rotation_channel(channel, frame, rotation)
change = change or hasChanged
rot3 = rotation.to_3d()
rotation.xyz = rot3 * bone.matrix_local.inverted()
rotation.xyz = armatureMatrix * rotation.xyz
return rotation, change
# #####################################################
# Model exporter - materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def generate_mtl(materials):
"""Generate dummy materials.
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
"DbgName": m,
"DbgIndex": index,
"DbgColor": generate_color(index),
"vertexColors" : False
}
return mtl
def value2string(v):
if type(v) == str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
elif type(v) == list:
return "[%s]" % (", ".join(value2string(x) for x in v))
return str(v)
def generate_materials(mtl, materials, draw_type):
"""Generate JS array of materials objects
"""
mtl_array = []
for m in mtl:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if draw_type in [ "BOUNDS", "WIRE" ]:
mtl[m]['wireframe'] = True
mtl[m]['DbgColor'] = 0xff0000
mtl_raw = ",\n".join(['\t\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)]), len(mtl_array)
def extract_materials(mesh, scene, option_colors, option_copy_textures, filepath):
world = scene.world
materials = {}
for m in mesh.materials:
if m:
materials[m.name] = {}
material = materials[m.name]
material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
m.diffuse_intensity * m.diffuse_color[1],
m.diffuse_intensity * m.diffuse_color[2]]
material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
m.specular_intensity * m.specular_color[1],
m.specular_intensity * m.specular_color[2]]
material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
m.ambient * material['colorDiffuse'][1],
m.ambient * material['colorDiffuse'][2]]
material['colorEmissive'] = [m.emit * material['colorDiffuse'][0],
m.emit * material['colorDiffuse'][1],
m.emit * material['colorDiffuse'][2]]
material['transparency'] = m.alpha
# not sure about mapping values to Blinn-Phong shader
# Blender uses INT from [1, 511] with default 0
# http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
material["specularCoef"] = m.specular_hardness
textures = guess_material_textures(m)
handle_texture('diffuse', textures, material, filepath, option_copy_textures)
handle_texture('light', textures, material, filepath, option_copy_textures)
handle_texture('normal', textures, material, filepath, option_copy_textures)
handle_texture('specular', textures, material, filepath, option_copy_textures)
handle_texture('bump', textures, material, filepath, option_copy_textures)
material["vertexColors"] = m.THREE_useVertexColors and option_colors
# can't really use this reliably to tell apart Phong from Lambert
# as Blender defaults to non-zero specular color
#if m.specular_intensity > 0.0 and (m.specular_color[0] > 0 or m.specular_color[1] > 0 or m.specular_color[2] > 0):
# material['shading'] = "Phong"
#else:
# material['shading'] = "Lambert"
if textures['normal']:
material['shading'] = "Phong"
else:
material['shading'] = m.THREE_materialType
material['blending'] = m.THREE_blendingType
material['depthWrite'] = m.THREE_depthWrite
material['depthTest'] = m.THREE_depthTest
material['transparent'] = m.use_transparency
return materials
def generate_materials_string(mesh, scene, option_colors, draw_type, option_copy_textures, filepath, offset):
random.seed(42) # to get well defined color order for debug materials
materials = {}
if mesh.materials:
for i, m in enumerate(mesh.materials):
mat_id = i + offset
if m:
materials[m.name] = mat_id
else:
materials["undefined_dummy_%0d" % mat_id] = mat_id
if not materials:
materials = { 'default': 0 }
# default dummy materials
mtl = generate_mtl(materials)
# extract real materials from the mesh
mtl.update(extract_materials(mesh, scene, option_colors, option_copy_textures, filepath))
return generate_materials(mtl, materials, draw_type)
def handle_texture(id, textures, material, filepath, option_copy_textures):
if textures[id] and textures[id]['texture'].users > 0 and len(textures[id]['texture'].users_material) > 0:
texName = 'map%s' % id.capitalize()
repeatName = 'map%sRepeat' % id.capitalize()
wrapName = 'map%sWrap' % id.capitalize()
slot = textures[id]['slot']
texture = textures[id]['texture']
image = texture.image
fname = extract_texture_filename(image)
material[texName] = fname
if option_copy_textures:
save_image(image, fname, filepath)
if texture.repeat_x != 1 or texture.repeat_y != 1:
material[repeatName] = [texture.repeat_x, texture.repeat_y]
if texture.extension == "REPEAT":
wrap_x = "repeat"
wrap_y = "repeat"
if texture.use_mirror_x:
wrap_x = "mirror"
if texture.use_mirror_y:
wrap_y = "mirror"
material[wrapName] = [wrap_x, wrap_y]
if slot.use_map_normal:
if slot.normal_factor != 1.0:
if id == "bump":
material['mapBumpScale'] = slot.normal_factor
else:
material['mapNormalFactor'] = slot.normal_factor
# #####################################################
# ASCII model generator
# #####################################################
def generate_ascii_model(meshes, morphs,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step):
vertices = []
vertex_offset = 0
vertex_offsets = []
nnormal = 0
normals = {}
ncolor = 0
colors = {}
nuvs = []
uv_layers = []
nmaterial = 0
materials = []
for mesh, object in meshes:
vertexUV = len(mesh.uv_textures) > 0
vertexColors = len(mesh.vertex_colors) > 0
mesh_extract_colors = option_colors and vertexColors
mesh_extract_uvs = option_uv_coords and vertexUV
if vertexUV:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
mesh_extract_uvs = False
if vertexColors:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
mesh_extract_colors = False
vertex_offsets.append(vertex_offset)
vertex_offset += len(vertices)
vertices.extend(mesh.vertices[:])
if option_normals:
nnormal = extract_vertex_normals(mesh, normals, nnormal)
if mesh_extract_colors:
ncolor = extract_vertex_colors(mesh, colors, ncolor)
if mesh_extract_uvs:
nuvs = extract_uvs(mesh, uv_layers, nuvs)
if option_materials:
mesh_materials, nmaterial = generate_materials_string(mesh, scene, mesh_extract_colors, object.draw_type, option_copy_textures, filepath, nmaterial)
materials.append(mesh_materials)
morphTargets_string = ""
nmorphTarget = 0
if option_animation_morph:
chunks = []
for i, morphVertices in enumerate(morphs):
morphTarget = '{ "name": "%s_%06d", "vertices": [%s] }' % ("animation", i, morphVertices)
chunks.append(morphTarget)
morphTargets_string = ",\n\t".join(chunks)
nmorphTarget = len(morphs)
if align_model == 1:
center(vertices)
elif align_model == 2:
bottom(vertices)
elif align_model == 3:
top(vertices)
faces_string, nfaces = generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces)
bones_string, nbone = generate_bones(meshes, option_bones, flipyz)
indices_string, weights_string = generate_indices_and_weights(meshes, option_skinning)
materials_string = ",\n\n".join(materials)
model_string = TEMPLATE_MODEL_ASCII % {
"scale" : option_scale,
"uvs" : generate_uvs(uv_layers, option_uv_coords),
"normals" : generate_normals(normals, option_normals),
"colors" : generate_vertex_colors(colors, option_colors),
"materials" : materials_string,
"vertices" : generate_vertices(vertices, option_vertices_truncate, option_vertices),
"faces" : faces_string,
"morphTargets" : morphTargets_string,
"bones" : bones_string,
"indices" : indices_string,
"weights" : weights_string,
"animations" : generate_all_animations(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time)
}
text = TEMPLATE_FILE_ASCII % {
"nvertex" : len(vertices),
"nface" : nfaces,
"nuvs" : ",".join("%d" % n for n in nuvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : nmaterial,
"nmorphTarget": nmorphTarget,
"nbone" : nbone,
"model" : model_string
}
return text, model_string
# #####################################################
# Model exporter - export single mesh
# #####################################################
def extract_meshes(objects, scene, export_single_model, option_scale, flipyz):
meshes = []
for object in objects:
if object.type == "MESH" and object.THREE_exportGeometry:
# collapse modifiers into mesh
mesh = object.to_mesh(scene, True, 'RENDER')
if not mesh:
raise Exception("Error, could not get mesh data from object [%s]" % object.name)
# preserve original name
mesh.name = object.name
if export_single_model:
if flipyz:
# that's what Blender's native export_obj.py does to flip YZ
X_ROT = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh.transform(X_ROT * object.matrix_world)
else:
mesh.transform(object.matrix_world)
mesh.update(calc_tessface=True)
mesh.calc_normals()
mesh.calc_tessface()
mesh.transform(mathutils.Matrix.Scale(option_scale, 4))
meshes.append([mesh, object])
return meshes
def generate_mesh_string(objects, scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step):
meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
morphs = []
if option_animation_morph:
original_frame = scene.frame_current # save animation state
scene_frames = range(scene.frame_start, scene.frame_end + 1, option_frame_step)
for index, frame in enumerate(scene_frames):
scene.frame_set(frame, 0.0)
anim_meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
frame_vertices = []
for mesh, object in anim_meshes:
frame_vertices.extend(mesh.vertices[:])
if index == 0:
if align_model == 1:
offset = center(frame_vertices)
elif align_model == 2:
offset = bottom(frame_vertices)
elif align_model == 3:
offset = top(frame_vertices)
else:
offset = False
else:
if offset:
translate(frame_vertices, offset)
morphVertices = generate_vertices(frame_vertices, option_vertices_truncate, option_vertices)
morphs.append(morphVertices)
# remove temp meshes
for mesh, object in anim_meshes:
bpy.data.meshes.remove(mesh)
scene.frame_set(original_frame, 0.0) # restore animation state
text, model_string = generate_ascii_model(meshes, morphs,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step)
# remove temp meshes
for mesh, object in meshes:
bpy.data.meshes.remove(mesh)
return text, model_string
def export_mesh(objects,
scene, filepath,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step,
option_frame_index_as_time):
"""Export single mesh"""
text, model_string = generate_mesh_string(objects,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step)
write_file(filepath, text)
print("writing", filepath, "done")
# #####################################################
# Scene exporter - render elements
# #####################################################
def generate_quat(quat):
return TEMPLATE_VEC4 % (quat.x, quat.y, quat.z, quat.w)
def generate_vec4(vec):
return TEMPLATE_VEC4 % (vec[0], vec[1], vec[2], vec[3])
def generate_vec3(vec, flipyz = False):
if flipyz:
return TEMPLATE_VEC3 % (vec[0], vec[2], vec[1])
return TEMPLATE_VEC3 % (vec[0], vec[1], vec[2])
def generate_vec2(vec):
return TEMPLATE_VEC2 % (vec[0], vec[1])
def generate_hex(number):
return TEMPLATE_HEX % number
def generate_string(s):
return TEMPLATE_STRING % s
def generate_string_list(src_list):
return ", ".join(generate_string(item) for item in src_list)
def generate_section(label, content):
return TEMPLATE_SECTION % (label, content)
def get_mesh_filename(mesh):
object_id = mesh["data"]["name"]
filename = "%s.js" % sanitize(object_id)
return filename
def generate_material_id_list(materials):
chunks = []
for material in materials:
chunks.append(material.name)
return chunks
def generate_group_id_list(obj):
chunks = []
for group in bpy.data.groups:
if obj.name in group.objects:
chunks.append(group.name)
return chunks
def generate_bool_property(property):
if property:
return "true"
return "false"
# #####################################################
# Scene exporter - objects
# #####################################################
def generate_objects(data):
chunks = []
for obj in data["objects"]:
if obj.type == "MESH" and obj.THREE_exportGeometry:
object_id = obj.name
#if len(obj.modifiers) > 0:
# geo_name = obj.name
#else:
geo_name = obj.data.name
geometry_id = "geo_%s" % geo_name
material_ids = generate_material_id_list(obj.material_slots)
group_ids = generate_group_id_list(obj)
if data["flipyz"]:
matrix_world = ROTATE_X_PI2 * obj.matrix_world
else:
matrix_world = obj.matrix_world
position, quaternion, scale = matrix_world.decompose()
rotation = quaternion.to_euler("ZYX")
# use empty material string for multi-material objects
# this will trigger use of MeshFaceMaterial in SceneLoader
material_string = '""'
if len(material_ids) == 1:
material_string = generate_string_list(material_ids)
group_string = ""
if len(group_ids) > 0:
group_string = generate_string_list(group_ids)
castShadow = obj.THREE_castShadow
receiveShadow = obj.THREE_receiveShadow
doubleSided = obj.THREE_doubleSided
visible = obj.THREE_visible
geometry_string = generate_string(geometry_id)
object_string = TEMPLATE_OBJECT % {
"object_id" : generate_string(object_id),
"geometry_id" : geometry_string,
"group_id" : group_string,
"material_id" : material_string,
"position" : generate_vec3(position),
"rotation" : generate_vec3(rotation),
"quaternion" : generate_quat(quaternion),
"scale" : generate_vec3(scale),
"castShadow" : generate_bool_property(castShadow),
"receiveShadow" : generate_bool_property(receiveShadow),
"doubleSided" : generate_bool_property(doubleSided),
"visible" : generate_bool_property(visible)
}
chunks.append(object_string)
elif obj.type == "EMPTY" or (obj.type == "MESH" and not obj.THREE_exportGeometry):
object_id = obj.name
group_ids = generate_group_id_list(obj)
if data["flipyz"]:
matrix_world = ROTATE_X_PI2 * obj.matrix_world
else:
matrix_world = obj.matrix_world
position, quaternion, scale = matrix_world.decompose()
rotation = quaternion.to_euler("ZYX")
group_string = ""
if len(group_ids) > 0:
group_string = generate_string_list(group_ids)
object_string = TEMPLATE_EMPTY % {
"object_id" : generate_string(object_id),
"group_id" : group_string,
"position" : generate_vec3(position),
"rotation" : generate_vec3(rotation),
"quaternion" : generate_quat(quaternion),
"scale" : generate_vec3(scale)
}
chunks.append(object_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - geometries
# #####################################################
def generate_geometries(data):
chunks = []
geo_set = set()
for obj in data["objects"]:
if obj.type == "MESH" and obj.THREE_exportGeometry:
#if len(obj.modifiers) > 0:
# name = obj.name
#else:
name = obj.data.name
if name not in geo_set:
geometry_id = "geo_%s" % name
if data["embed_meshes"]:
embed_id = "emb_%s" % name
geometry_string = TEMPLATE_GEOMETRY_EMBED % {
"geometry_id" : generate_string(geometry_id),
"embed_id" : generate_string(embed_id)
}
else:
model_filename = os.path.basename(generate_mesh_filename(name, data["filepath"]))
geometry_string = TEMPLATE_GEOMETRY_LINK % {
"geometry_id" : generate_string(geometry_id),
"model_file" : generate_string(model_filename)
}
chunks.append(geometry_string)
geo_set.add(name)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - textures
# #####################################################
def generate_textures_scene(data):
chunks = []
# TODO: extract just textures actually used by some objects in the scene
for texture in bpy.data.textures:
if texture.type == 'IMAGE' and texture.image and texture.users > 0 and len(texture.users_material) > 0:
img = texture.image
texture_id = img.name
texture_file = extract_texture_filename(img)
if data["copy_textures"]:
save_image(img, texture_file, data["filepath"])
extras = ""
if texture.repeat_x != 1 or texture.repeat_y != 1:
extras += ',\n "repeat": [%g, %g]' % (texture.repeat_x, texture.repeat_y)
if texture.extension == "REPEAT":
wrap_x = "repeat"
wrap_y = "repeat"
if texture.use_mirror_x:
wrap_x = "mirror"
if texture.use_mirror_y:
wrap_y = "mirror"
extras += ',\n "wrap": ["%s", "%s"]' % (wrap_x, wrap_y)
texture_string = TEMPLATE_TEXTURE % {
"texture_id" : generate_string(texture_id),
"texture_file" : generate_string(texture_file),
"extras" : extras
}
chunks.append(texture_string)
return ",\n\n".join(chunks), len(chunks)
def extract_texture_filename(image):
fn = bpy.path.abspath(image.filepath)
fn = os.path.normpath(fn)
fn_strip = os.path.basename(fn)
return fn_strip
def save_image(img, name, fpath):
dst_dir = os.path.dirname(fpath)
dst_path = os.path.join(dst_dir, name)
ensure_folder_exist(dst_dir)
if img.packed_file:
img.save_render(dst_path)
else:
src_path = bpy.path.abspath(img.filepath)
shutil.copy(src_path, dst_dir)
# #####################################################
# Scene exporter - materials
# #####################################################
def extract_material_data(m, option_colors):
world = bpy.context.scene.world
material = { 'name': m.name }
material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
m.diffuse_intensity * m.diffuse_color[1],
m.diffuse_intensity * m.diffuse_color[2]]
material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
m.specular_intensity * m.specular_color[1],
m.specular_intensity * m.specular_color[2]]
material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
m.ambient * material['colorDiffuse'][1],
m.ambient * material['colorDiffuse'][2]]
material['colorEmissive'] = [m.emit * material['colorDiffuse'][0],
m.emit * material['colorDiffuse'][1],
m.emit * material['colorDiffuse'][2]]
material['transparency'] = m.alpha
# not sure about mapping values to Blinn-Phong shader
# Blender uses INT from [1,511] with default 0
# http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
material["specularCoef"] = m.specular_hardness
material["vertexColors"] = m.THREE_useVertexColors and option_colors
material['mapDiffuse'] = ""
material['mapLight'] = ""
material['mapSpecular'] = ""
material['mapNormal'] = ""
material['mapBump'] = ""
material['mapNormalFactor'] = 1.0
material['mapBumpScale'] = 1.0
textures = guess_material_textures(m)
if textures['diffuse']:
material['mapDiffuse'] = textures['diffuse']['texture'].image.name
if textures['light']:
material['mapLight'] = textures['light']['texture'].image.name
if textures['specular']:
material['mapSpecular'] = textures['specular']['texture'].image.name
if textures['normal']:
material['mapNormal'] = textures['normal']['texture'].image.name
if textures['normal']['slot'].use_map_normal:
material['mapNormalFactor'] = textures['normal']['slot'].normal_factor
if textures['bump']:
material['mapBump'] = textures['bump']['texture'].image.name
if textures['bump']['slot'].use_map_normal:
material['mapBumpScale'] = textures['bump']['slot'].normal_factor
material['shading'] = m.THREE_materialType
material['blending'] = m.THREE_blendingType
material['depthWrite'] = m.THREE_depthWrite
material['depthTest'] = m.THREE_depthTest
material['transparent'] = m.use_transparency
return material
def guess_material_textures(material):
textures = {
'diffuse' : None,
'light' : None,
'normal' : None,
'specular': None,
'bump' : None
}
# just take first textures of each, for the moment three.js materials can't handle more
# assume diffuse comes before lightmap, normalmap has checked flag
for i in range(len(material.texture_slots)):
slot = material.texture_slots[i]
if slot:
texture = slot.texture
if slot.use and texture and texture.type == 'IMAGE':
# normal map in Blender UI: textures => image sampling => normal map
if texture.use_normal_map:
textures['normal'] = { "texture": texture, "slot": slot }
# bump map in Blender UI: textures => influence => geometry => normal
elif slot.use_map_normal:
textures['bump'] = { "texture": texture, "slot": slot }
elif slot.use_map_specular or slot.use_map_hardness:
textures['specular'] = { "texture": texture, "slot": slot }
else:
if not textures['diffuse'] and not slot.blend_type == 'MULTIPLY':
textures['diffuse'] = { "texture": texture, "slot": slot }
else:
textures['light'] = { "texture": texture, "slot": slot }
if textures['diffuse'] and textures['normal'] and textures['light'] and textures['specular'] and textures['bump']:
break
return textures
def generate_material_string(material):
material_id = material["name"]
# default to Lambert
shading = material.get("shading", "Lambert")
# normal and bump mapped materials must use Phong
# to get all required parameters for normal shader
if material['mapNormal'] or material['mapBump']:
shading = "Phong"
type_map = {
"Lambert" : "MeshLambertMaterial",
"Phong" : "MeshPhongMaterial"
}
material_type = type_map.get(shading, "MeshBasicMaterial")
parameters = '"color": %d' % rgb2int(material["colorDiffuse"])
parameters += ', "ambient": %d' % rgb2int(material["colorDiffuse"])
parameters += ', "emissive": %d' % rgb2int(material["colorEmissive"])
parameters += ', "opacity": %.2g' % material["transparency"]
if shading == "Phong":
parameters += ', "ambient": %d' % rgb2int(material["colorAmbient"])
parameters += ', "emissive": %d' % rgb2int(material["colorEmissive"])
parameters += ', "specular": %d' % rgb2int(material["colorSpecular"])
parameters += ', "shininess": %.1g' % material["specularCoef"]
colorMap = material['mapDiffuse']
lightMap = material['mapLight']
specularMap = material['mapSpecular']
normalMap = material['mapNormal']
bumpMap = material['mapBump']
normalMapFactor = material['mapNormalFactor']
bumpMapScale = material['mapBumpScale']
if colorMap:
parameters += ', "map": %s' % generate_string(colorMap)
if lightMap:
parameters += ', "lightMap": %s' % generate_string(lightMap)
if specularMap:
parameters += ', "specularMap": %s' % generate_string(specularMap)
if normalMap:
parameters += ', "normalMap": %s' % generate_string(normalMap)
if bumpMap:
parameters += ', "bumpMap": %s' % generate_string(bumpMap)
if normalMapFactor != 1.0:
parameters += ', "normalMapFactor": %g' % normalMapFactor
if bumpMapScale != 1.0:
parameters += ', "bumpMapScale": %g' % bumpMapScale
if material['vertexColors']:
parameters += ', "vertexColors": "vertex"'
if material['transparent']:
parameters += ', "transparent": true'
parameters += ', "blending": "%s"' % material['blending']
if not material['depthWrite']:
parameters += ', "depthWrite": false'
if not material['depthTest']:
parameters += ', "depthTest": false'
material_string = TEMPLATE_MATERIAL_SCENE % {
"material_id" : generate_string(material_id),
"type" : generate_string(material_type),
"parameters" : parameters
}
return material_string
def generate_materials_scene(data):
chunks = []
def material_is_used(mat):
minimum_users = 1
if mat.use_fake_user:
minimum_users = 2 #we must ignore the "fake user" in this case
return mat.users >= minimum_users
used_materials = [m for m in bpy.data.materials if material_is_used(m)]
for m in used_materials:
material = extract_material_data(m, data["use_colors"])
material_string = generate_material_string(material)
chunks.append(material_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - cameras
# #####################################################
def generate_cameras(data):
chunks = []
if data["use_cameras"]:
cams = bpy.data.objects
cams = [ob for ob in cams if (ob.type == 'CAMERA')]
if not cams:
camera = DEFAULTS["camera"]
if camera["type"] == "PerspectiveCamera":
camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
"camera_id" : generate_string(camera["name"]),
"fov" : camera["fov"],
"aspect" : camera["aspect"],
"near" : camera["near"],
"far" : camera["far"],
"position" : generate_vec3(camera["position"]),
"target" : generate_vec3(camera["target"])
}
elif camera["type"] == "OrthographicCamera":
camera_string = TEMPLATE_CAMERA_ORTHO % {
"camera_id" : generate_string(camera["name"]),
"left" : camera["left"],
"right" : camera["right"],
"top" : camera["top"],
"bottom" : camera["bottom"],
"near" : camera["near"],
"far" : camera["far"],
"position" : generate_vec3(camera["position"]),
"target" : generate_vec3(camera["target"])
}
chunks.append(camera_string)
else:
for cameraobj in cams:
camera = bpy.data.cameras[cameraobj.data.name]
if camera.id_data.type == "PERSP":
camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
"camera_id" : generate_string(cameraobj.name),
"fov" : (camera.angle / 3.14) * 180.0,
"aspect" : 1.333,
"near" : camera.clip_start,
"far" : camera.clip_end,
"position" : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]], data["flipyz"]),
"target" : generate_vec3([0, 0, 0])
}
elif camera.id_data.type == "ORTHO":
camera_string = TEMPLATE_CAMERA_ORTHO % {
"camera_id" : generate_string(camera.name),
"left" : -(camera.angle_x * camera.ortho_scale),
"right" : (camera.angle_x * camera.ortho_scale),
"top" : (camera.angle_y * camera.ortho_scale),
"bottom" : -(camera.angle_y * camera.ortho_scale),
"near" : camera.clip_start,
"far" : camera.clip_end,
"position" : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]], data["flipyz"]),
"target" : generate_vec3([0, 0, 0])
}
chunks.append(camera_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - lights
# #####################################################
def generate_lights(data):
chunks = []
if data["use_lights"]:
lamps = data["objects"]
lamps = [ob for ob in lamps if (ob.type == 'LAMP')]
for lamp in lamps:
light_string = ""
concrete_lamp = lamp.data
if concrete_lamp.type == "POINT":
light_string = TEMPLATE_LIGHT_POINT % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy
}
elif concrete_lamp.type == "SUN":
light_string = TEMPLATE_LIGHT_SUN % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy
}
elif concrete_lamp.type == "SPOT":
light_string = TEMPLATE_LIGHT_SPOT % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy,
"use_shadow" : concrete_lamp.use_shadow,
"angle" : concrete_lamp.spot_size
}
elif concrete_lamp.type == "HEMI":
light_string = TEMPLATE_LIGHT_HEMI % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy
}
elif concrete_lamp.type == "AREA":
light_string = TEMPLATE_LIGHT_AREA % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy,
"gamma" : concrete_lamp.gamma,
"shape" : concrete_lamp.shape,
"size" : concrete_lamp.size,
"size_y" : concrete_lamp.size_y
}
chunks.append(light_string)
if not lamps:
lamps.append(DEFAULTS["light"])
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - embedded meshes
# #####################################################
def generate_embeds(data):
if data["embed_meshes"]:
chunks = []
for e in data["embeds"]:
embed = '"emb_%s": {%s}' % (e, data["embeds"][e])
chunks.append(embed)
return ",\n\n".join(chunks)
return ""
# #####################################################
# Scene exporter - generate ASCII scene
# #####################################################
def generate_ascii_scene(data):
objects, nobjects = generate_objects(data)
geometries, ngeometries = generate_geometries(data)
textures, ntextures = generate_textures_scene(data)
materials, nmaterials = generate_materials_scene(data)
lights, nlights = generate_lights(data)
cameras, ncameras = generate_cameras(data)
embeds = generate_embeds(data)
if nlights > 0:
if nobjects > 0:
objects = objects + ",\n\n" + lights
else:
objects = lights
nobjects += nlights
if ncameras > 0:
if nobjects > 0:
objects = objects + ",\n\n" + cameras
else:
objects = cameras
nobjects += ncameras
basetype = "relativeTo"
if data["base_html"]:
basetype += "HTML"
else:
basetype += "Scene"
sections = [
["objects", objects],
["geometries", geometries],
["textures", textures],
["materials", materials],
["embeds", embeds]
]
chunks = []
for label, content in sections:
if content:
chunks.append(generate_section(label, content))
sections_string = "\n".join(chunks)
default_camera = ""
if data["use_cameras"]:
cams = [ob for ob in bpy.data.objects if (ob.type == 'CAMERA' and ob.select)]
if not cams:
default_camera = "default_camera"
else:
default_camera = cams[0].name
parameters = {
"fname" : data["source_file"],
"sections" : sections_string,
"bgcolor" : generate_vec3(DEFAULTS["bgcolor"]),
"bgalpha" : DEFAULTS["bgalpha"],
"defcamera" : generate_string(default_camera),
"nobjects" : nobjects,
"ngeometries" : ngeometries,
"ntextures" : ntextures,
"basetype" : generate_string(basetype),
"nmaterials" : nmaterials,
"position" : generate_vec3(DEFAULTS["position"]),
"rotation" : generate_vec3(DEFAULTS["rotation"]),
"scale" : generate_vec3(DEFAULTS["scale"])
}
text = TEMPLATE_SCENE_ASCII % parameters
return text
def export_scene(scene, filepath, flipyz, option_colors, option_lights, option_cameras, option_embed_meshes, embeds, option_url_base_html, option_copy_textures):
source_file = os.path.basename(bpy.data.filepath)
# objects are contained in scene and linked groups
objects = []
# get scene objects
sceneobjects = scene.objects
for obj in sceneobjects:
objects.append(obj)
scene_text = ""
data = {
"scene" : scene,
"objects" : objects,
"embeds" : embeds,
"source_file" : source_file,
"filepath" : filepath,
"flipyz" : flipyz,
"use_colors" : option_colors,
"use_lights" : option_lights,
"use_cameras" : option_cameras,
"embed_meshes" : option_embed_meshes,
"base_html" : option_url_base_html,
"copy_textures": option_copy_textures
}
scene_text += generate_ascii_scene(data)
write_file(filepath, scene_text)
# #####################################################
# Main
# #####################################################
def save(operator, context, filepath = "",
option_flip_yz = True,
option_vertices = True,
option_vertices_truncate = False,
option_faces = True,
option_normals = True,
option_uv_coords = True,
option_materials = True,
option_colors = True,
option_bones = True,
option_skinning = True,
align_model = 0,
option_export_scene = False,
option_lights = False,
option_cameras = False,
option_scale = 1.0,
option_embed_meshes = True,
option_url_base_html = False,
option_copy_textures = False,
option_animation_morph = False,
option_animation_skeletal = False,
option_frame_step = 1,
option_all_meshes = True,
option_frame_index_as_time = False):
#print("URL TYPE", option_url_base_html)
filepath = ensure_extension(filepath, '.js')
scene = context.scene
if scene.objects.active:
bpy.ops.object.mode_set(mode='OBJECT')
if option_all_meshes:
sceneobjects = scene.objects
else:
sceneobjects = context.selected_objects
# objects are contained in scene and linked groups
objects = []
# get scene objects
for obj in sceneobjects:
objects.append(obj)
if option_export_scene:
geo_set = set()
embeds = {}
for object in objects:
if object.type == "MESH" and object.THREE_exportGeometry:
# create extra copy of geometry with applied modifiers
# (if they exist)
#if len(object.modifiers) > 0:
# name = object.name
# otherwise can share geometry
#else:
name = object.data.name
if name not in geo_set:
if option_embed_meshes:
text, model_string = generate_mesh_string([object], scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
False, # align_model
option_flip_yz,
option_scale,
False, # export_single_model
False, # option_copy_textures
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step)
embeds[object.data.name] = model_string
else:
fname = generate_mesh_filename(name, filepath)
export_mesh([object], scene,
fname,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
False, # align_model
option_flip_yz,
option_scale,
False, # export_single_model
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step,
option_frame_index_as_time)
geo_set.add(name)
export_scene(scene, filepath,
option_flip_yz,
option_colors,
option_lights,
option_cameras,
option_embed_meshes,
embeds,
option_url_base_html,
option_copy_textures)
else:
export_mesh(objects, scene, filepath,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
option_flip_yz,
option_scale,
True, # export_single_model
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step,
option_frame_index_as_time)
return {'FINISHED'} | three.js-master | utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/export_threejs.py |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# ################################################################
# Init
# ################################################################
bl_info = {
"name": "three.js format",
"author": "mrdoob, kikko, alteredq, remoe, pxf, n3tfr34k, crobi",
"version": (1, 5, 0),
"blender": (2, 7, 0),
"location": "File > Import-Export",
"description": "Import-Export three.js meshes",
"warning": "",
"wiki_url": "https://github.com/mrdoob/three.js/tree/master/utils/exporters/blender",
"tracker_url": "https://github.com/mrdoob/three.js/issues",
"category": "Import-Export"}
# To support reload properly, try to access a package var,
# if it's there, reload everything
import bpy
if "bpy" in locals():
import imp
if "export_threejs" in locals():
imp.reload(export_threejs)
if "import_threejs" in locals():
imp.reload(import_threejs)
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
# ################################################################
# Custom properties
# ################################################################
bpy.types.Object.THREE_castShadow = bpy.props.BoolProperty()
bpy.types.Object.THREE_receiveShadow = bpy.props.BoolProperty()
bpy.types.Object.THREE_doubleSided = bpy.props.BoolProperty()
bpy.types.Object.THREE_exportGeometry = bpy.props.BoolProperty(default = True)
bpy.types.Object.THREE_visible = bpy.props.BoolProperty(default = True)
bpy.types.Material.THREE_useVertexColors = bpy.props.BoolProperty()
bpy.types.Material.THREE_depthWrite = bpy.props.BoolProperty(default = True)
bpy.types.Material.THREE_depthTest = bpy.props.BoolProperty(default = True)
THREE_material_types = [("Basic", "Basic", "Basic"), ("Phong", "Phong", "Phong"), ("Lambert", "Lambert", "Lambert")]
bpy.types.Material.THREE_materialType = EnumProperty(name = "Material type", description = "Material type", items = THREE_material_types, default = "Lambert")
THREE_blending_types = [("NoBlending", "NoBlending", "NoBlending"), ("NormalBlending", "NormalBlending", "NormalBlending"),
("AdditiveBlending", "AdditiveBlending", "AdditiveBlending"), ("SubtractiveBlending", "SubtractiveBlending", "SubtractiveBlending"),
("MultiplyBlending", "MultiplyBlending", "MultiplyBlending"), ("AdditiveAlphaBlending", "AdditiveAlphaBlending", "AdditiveAlphaBlending")]
bpy.types.Material.THREE_blendingType = EnumProperty(name = "Blending type", description = "Blending type", items = THREE_blending_types, default = "NormalBlending")
class OBJECT_PT_hello( bpy.types.Panel ):
bl_label = "THREE"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="Selected object: " + obj.name )
row = layout.row()
row.prop( obj, "THREE_exportGeometry", text="Export geometry" )
row = layout.row()
row.prop( obj, "THREE_castShadow", text="Casts shadow" )
row = layout.row()
row.prop( obj, "THREE_receiveShadow", text="Receives shadow" )
row = layout.row()
row.prop( obj, "THREE_doubleSided", text="Double sided" )
row = layout.row()
row.prop( obj, "THREE_visible", text="Visible" )
class MATERIAL_PT_hello( bpy.types.Panel ):
bl_label = "THREE"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
def draw(self, context):
layout = self.layout
mat = context.material
row = layout.row()
row.label(text="Selected material: " + mat.name )
row = layout.row()
row.prop( mat, "THREE_materialType", text="Material type" )
row = layout.row()
row.prop( mat, "THREE_blendingType", text="Blending type" )
row = layout.row()
row.prop( mat, "THREE_useVertexColors", text="Use vertex colors" )
row = layout.row()
row.prop( mat, "THREE_depthWrite", text="Enable depth writing" )
row = layout.row()
row.prop( mat, "THREE_depthTest", text="Enable depth testing" )
# ################################################################
# Importer
# ################################################################
class ImportTHREEJS(bpy.types.Operator, ImportHelper):
'''Load a Three.js ASCII JSON model'''
bl_idname = "import.threejs"
bl_label = "Import Three.js"
filename_ext = ".js"
filter_glob = StringProperty(default="*.js", options={'HIDDEN'})
option_flip_yz = BoolProperty(name="Flip YZ", description="Flip YZ", default=True)
recalculate_normals = BoolProperty(name="Recalculate normals", description="Recalculate vertex normals", default=True)
option_worker = BoolProperty(name="Worker", description="Old format using workers", default=False)
def execute(self, context):
import io_mesh_threejs.import_threejs
return io_mesh_threejs.import_threejs.load(self, context, **self.properties)
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self.properties, "option_flip_yz")
row = layout.row()
row.prop(self.properties, "recalculate_normals")
row = layout.row()
row.prop(self.properties, "option_worker")
# ################################################################
# Exporter - settings
# ################################################################
SETTINGS_FILE_EXPORT = "threejs_settings_export.js"
import os
import json
def file_exists(filename):
"""Return true if file exists and accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_settings_fullpath():
return os.path.join(bpy.app.tempdir, SETTINGS_FILE_EXPORT)
def save_settings_export(properties):
settings = {
"option_export_scene" : properties.option_export_scene,
"option_embed_meshes" : properties.option_embed_meshes,
"option_url_base_html" : properties.option_url_base_html,
"option_copy_textures" : properties.option_copy_textures,
"option_lights" : properties.option_lights,
"option_cameras" : properties.option_cameras,
"option_animation_morph" : properties.option_animation_morph,
"option_animation_skeletal" : properties.option_animation_skeletal,
"option_frame_index_as_time" : properties.option_frame_index_as_time,
"option_frame_step" : properties.option_frame_step,
"option_all_meshes" : properties.option_all_meshes,
"option_flip_yz" : properties.option_flip_yz,
"option_materials" : properties.option_materials,
"option_normals" : properties.option_normals,
"option_colors" : properties.option_colors,
"option_uv_coords" : properties.option_uv_coords,
"option_faces" : properties.option_faces,
"option_vertices" : properties.option_vertices,
"option_skinning" : properties.option_skinning,
"option_bones" : properties.option_bones,
"option_vertices_truncate" : properties.option_vertices_truncate,
"option_scale" : properties.option_scale,
"align_model" : properties.align_model
}
fname = get_settings_fullpath()
f = open(fname, "w")
json.dump(settings, f)
def restore_settings_export(properties):
settings = {}
fname = get_settings_fullpath()
if file_exists(fname):
f = open(fname, "r")
settings = json.load(f)
properties.option_vertices = settings.get("option_vertices", True)
properties.option_vertices_truncate = settings.get("option_vertices_truncate", False)
properties.option_faces = settings.get("option_faces", True)
properties.option_normals = settings.get("option_normals", True)
properties.option_colors = settings.get("option_colors", True)
properties.option_uv_coords = settings.get("option_uv_coords", True)
properties.option_materials = settings.get("option_materials", True)
properties.option_skinning = settings.get("option_skinning", True)
properties.option_bones = settings.get("option_bones", True)
properties.align_model = settings.get("align_model", "None")
properties.option_scale = settings.get("option_scale", 1.0)
properties.option_flip_yz = settings.get("option_flip_yz", True)
properties.option_export_scene = settings.get("option_export_scene", False)
properties.option_embed_meshes = settings.get("option_embed_meshes", True)
properties.option_url_base_html = settings.get("option_url_base_html", False)
properties.option_copy_textures = settings.get("option_copy_textures", False)
properties.option_lights = settings.get("option_lights", False)
properties.option_cameras = settings.get("option_cameras", False)
properties.option_animation_morph = settings.get("option_animation_morph", False)
properties.option_animation_skeletal = settings.get("option_animation_skeletal", False)
properties.option_frame_index_as_time = settings.get("option_frame_index_as_time", False)
properties.option_frame_step = settings.get("option_frame_step", 1)
properties.option_all_meshes = settings.get("option_all_meshes", True)
# ################################################################
# Exporter
# ################################################################
class ExportTHREEJS(bpy.types.Operator, ExportHelper):
'''Export selected object / scene for Three.js (ASCII JSON format).'''
bl_idname = "export.threejs"
bl_label = "Export Three.js"
filename_ext = ".js"
option_vertices = BoolProperty(name = "Vertices", description = "Export vertices", default = True)
option_vertices_deltas = BoolProperty(name = "Deltas", description = "Delta vertices", default = False)
option_vertices_truncate = BoolProperty(name = "Truncate", description = "Truncate vertices", default = False)
option_faces = BoolProperty(name = "Faces", description = "Export faces", default = True)
option_faces_deltas = BoolProperty(name = "Deltas", description = "Delta faces", default = False)
option_normals = BoolProperty(name = "Normals", description = "Export normals", default = True)
option_colors = BoolProperty(name = "Colors", description = "Export vertex colors", default = True)
option_uv_coords = BoolProperty(name = "UVs", description = "Export texture coordinates", default = True)
option_materials = BoolProperty(name = "Materials", description = "Export materials", default = True)
option_skinning = BoolProperty(name = "Skinning", description = "Export skin data", default = True)
option_bones = BoolProperty(name = "Bones", description = "Export bones", default = True)
align_types = [("None","None","None"), ("Center","Center","Center"), ("Bottom","Bottom","Bottom"), ("Top","Top","Top")]
align_model = EnumProperty(name = "Align model", description = "Align model", items = align_types, default = "None")
option_scale = FloatProperty(name = "Scale", description = "Scale vertices", min = 0.01, max = 1000.0, soft_min = 0.01, soft_max = 1000.0, default = 1.0)
option_flip_yz = BoolProperty(name = "Flip YZ", description = "Flip YZ", default = True)
option_export_scene = BoolProperty(name = "Scene", description = "Export scene", default = False)
option_embed_meshes = BoolProperty(name = "Embed meshes", description = "Embed meshes", default = True)
option_copy_textures = BoolProperty(name = "Copy textures", description = "Copy textures", default = False)
option_url_base_html = BoolProperty(name = "HTML as url base", description = "Use HTML as url base ", default = False)
option_lights = BoolProperty(name = "Lights", description = "Export default scene lights", default = False)
option_cameras = BoolProperty(name = "Cameras", description = "Export default scene cameras", default = False)
option_animation_morph = BoolProperty(name = "Morph animation", description = "Export animation (morphs)", default = False)
option_animation_skeletal = BoolProperty(name = "Skeletal animation", description = "Export animation (skeletal)", default = False)
option_frame_index_as_time = BoolProperty(name = "Frame index as time", description = "Use (original) frame index as frame time", default = False)
option_frame_step = IntProperty(name = "Frame step", description = "Animation frame step", min = 1, max = 1000, soft_min = 1, soft_max = 1000, default = 1)
option_all_meshes = BoolProperty(name = "All meshes", description = "All meshes (merged)", default = True)
def invoke(self, context, event):
restore_settings_export(self.properties)
return ExportHelper.invoke(self, context, event)
@classmethod
def poll(cls, context):
return context.active_object != None
def execute(self, context):
print("Selected: " + context.active_object.name)
if not self.properties.filepath:
raise Exception("filename not set")
save_settings_export(self.properties)
filepath = self.filepath
import io_mesh_threejs.export_threejs
return io_mesh_threejs.export_threejs.save(self, context, **self.properties)
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Geometry:")
row = layout.row()
row.prop(self.properties, "option_vertices")
# row = layout.row()
# row.enabled = self.properties.option_vertices
# row.prop(self.properties, "option_vertices_deltas")
row.prop(self.properties, "option_vertices_truncate")
layout.separator()
row = layout.row()
row.prop(self.properties, "option_faces")
row = layout.row()
row.enabled = self.properties.option_faces
# row.prop(self.properties, "option_faces_deltas")
layout.separator()
row = layout.row()
row.prop(self.properties, "option_normals")
layout.separator()
row = layout.row()
row.prop(self.properties, "option_bones")
row.prop(self.properties, "option_skinning")
layout.separator()
row = layout.row()
row.label(text="Materials:")
row = layout.row()
row.prop(self.properties, "option_uv_coords")
row.prop(self.properties, "option_colors")
row = layout.row()
row.prop(self.properties, "option_materials")
layout.separator()
row = layout.row()
row.label(text="Settings:")
row = layout.row()
row.prop(self.properties, "align_model")
row = layout.row()
row.prop(self.properties, "option_flip_yz")
row.prop(self.properties, "option_scale")
layout.separator()
row = layout.row()
row.label(text="--------- Experimental ---------")
layout.separator()
row = layout.row()
row.label(text="Scene:")
row = layout.row()
row.prop(self.properties, "option_export_scene")
row.prop(self.properties, "option_embed_meshes")
row = layout.row()
row.prop(self.properties, "option_lights")
row.prop(self.properties, "option_cameras")
layout.separator()
row = layout.row()
row.label(text="Animation:")
row = layout.row()
row.prop(self.properties, "option_animation_morph")
row = layout.row()
row.prop(self.properties, "option_animation_skeletal")
row = layout.row()
row.prop(self.properties, "option_frame_index_as_time")
row = layout.row()
row.prop(self.properties, "option_frame_step")
layout.separator()
row = layout.row()
row.label(text="Settings:")
row = layout.row()
row.prop(self.properties, "option_all_meshes")
row = layout.row()
row.prop(self.properties, "option_copy_textures")
row = layout.row()
row.prop(self.properties, "option_url_base_html")
layout.separator()
# ################################################################
# Common
# ################################################################
def menu_func_export(self, context):
default_path = bpy.data.filepath.replace(".blend", ".js")
self.layout.operator(ExportTHREEJS.bl_idname, text="Three.js (.js)").filepath = default_path
def menu_func_import(self, context):
self.layout.operator(ImportTHREEJS.bl_idname, text="Three.js (.js)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register() | three.js-master | utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/__init__.py |
#!/usr/bin/env python
import sys
if sys.version_info < (2, 7):
print("This script requires at least Python 2.7.")
print("Please, update to a newer version: http://www.python.org/download/releases/")
exit()
import argparse
import json
import os
import re
import shutil
import tempfile
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--include', action='append', required=True)
parser.add_argument('--output', default='sublimetext2/threejs.sublime-completions')
args = parser.parse_args()
output = args.output
# parsing
print(' * Generating ' + output)
fd, path = tempfile.mkstemp()
tmp = open(path, 'w')
tmp.write('{\n\t"scope": "source.js,source.js.embedded.html,source.coffee",\n\t"version": "r55",\n\t"completions":\n\t[\n')
for include in args.include:
with open('../build/includes/' + include + '.json','r') as f: files = json.load(f)
for filename in files:
filename = '../../' + filename;
with open(filename, 'r') as f:
string = f.read()
match = re.search('THREE.(\w+)[\ ]+?=[\ ]+?function[\ ]+\(([\w\,\ ]+)?\)', string)
if match:
name = match.group(1)
parameters = match.group(2)
if parameters is None:
parameters = ''
else:
array = parameters.split( ',' )
for i in range(len(array)):
array[i] = '${'+str(i+1)+':'+array[i].strip()+'}' # ${1:param}
parameters = ' '+', '.join(array)+' '
tmp.write('\t\t{ "trigger": "THREE.'+name+'", "contents": "THREE.'+name+'('+parameters+')$0" },\n' )
tmp.write("\t\t\"THREE\"\n\t]\n}")
tmp.close()
# save
shutil.copy(path, output)
os.chmod(output, 0o664); # temp files would usually get 0600
if __name__ == "__main__":
main()
| three.js-master | utils/editors/sublime.py |
"""Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
materials[identifier]["transparent"] = True
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| three.js-master | utils/converters/obj/convert_obj_three.py |
"""Split single OBJ model into mutliple OBJ files by materials
-------------------------------------
How to use
-------------------------------------
python split_obj.py -i infile.obj -o outfile
Will generate:
outfile_000.obj
outfile_001.obj
...
outfile_XXX.obj
-------------------------------------
Parser based on format description
-------------------------------------
http://en.wikipedia.org/wiki/Obj
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
TRUNCATE = False
SCALE = 1.0
# #####################################################
# Templates
# #####################################################
TEMPLATE_OBJ = u"""\
################################
# OBJ generated by split_obj.py
################################
# Faces: %(nfaces)d
# Vertices: %(nvertices)d
# Normals: %(nnormals)d
# UVs: %(nuvs)d
################################
# vertices
%(vertices)s
# normals
%(normals)s
# uvs
%(uvs)s
# faces
%(faces)s
"""
TEMPLATE_VERTEX = "v %f %f %f"
TEMPLATE_VERTEX_TRUNCATE = "v %d %d %d"
TEMPLATE_NORMAL = "vn %.5g %.5g %.5g"
TEMPLATE_UV = "vt %.5g %.5g"
TEMPLATE_FACE3_V = "f %d %d %d"
TEMPLATE_FACE4_V = "f %d %d %d %d"
TEMPLATE_FACE3_VT = "f %d/%d %d/%d %d/%d"
TEMPLATE_FACE4_VT = "f %d/%d %d/%d %d/%d %d/%d"
TEMPLATE_FACE3_VN = "f %d//%d %d//%d %d//%d"
TEMPLATE_FACE4_VN = "f %d//%d %d//%d %d//%d %d//%d"
TEMPLATE_FACE3_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d"
TEMPLATE_FACE4_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d"
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v': v, 't': t, 'n': n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
vertex_index.append(vertex['v'])
if vertex['t']:
uv_index.append(vertex['t'])
if vertex['n']:
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl" and len(chunks) == 2:
material = chunks[1]
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #############################################################################
# API - Breaker
# #############################################################################
def break_obj(infile, outfile):
"""Break infile.obj to outfile.obj
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
# sort faces by materials
chunks = {}
for face in faces:
material = face["material"]
if not material in chunks:
chunks[material] = {"faces": [], "vertices": set(), "normals": set(), "uvs": set()}
chunks[material]["faces"].append(face)
# extract unique vertex / normal / uv indices used per chunk
for material in chunks:
chunk = chunks[material]
for face in chunk["faces"]:
for i in face["vertex"]:
chunk["vertices"].add(i)
for i in face["normal"]:
chunk["normals"].add(i)
for i in face["uv"]:
chunk["uvs"].add(i)
# generate new OBJs
for mi, material in enumerate(chunks):
chunk = chunks[material]
# generate separate vertex / normal / uv index lists for each chunk
# (including mapping from original to new indices)
# get well defined order
new_vertices = list(chunk["vertices"])
new_normals = list(chunk["normals"])
new_uvs = list(chunk["uvs"])
# map original => new indices
vmap = {}
for i, v in enumerate(new_vertices):
vmap[v] = i + 1
nmap = {}
for i, n in enumerate(new_normals):
nmap[n] = i + 1
tmap = {}
for i, t in enumerate(new_uvs):
tmap[t] = i + 1
# vertices
pieces = []
for i in new_vertices:
vertex = vertices[i-1]
txt = TEMPLATE_VERTEX % (vertex[0], vertex[1], vertex[2])
pieces.append(txt)
str_vertices = "\n".join(pieces)
# normals
pieces = []
for i in new_normals:
normal = normals[i-1]
txt = TEMPLATE_NORMAL % (normal[0], normal[1], normal[2])
pieces.append(txt)
str_normals = "\n".join(pieces)
# uvs
pieces = []
for i in new_uvs:
uv = uvs[i-1]
txt = TEMPLATE_UV % (uv[0], uv[1])
pieces.append(txt)
str_uvs = "\n".join(pieces)
# faces
pieces = []
for face in chunk["faces"]:
txt = ""
fv = face["vertex"]
fn = face["normal"]
ft = face["uv"]
if len(fv) == 3:
va = vmap[fv[0]]
vb = vmap[fv[1]]
vc = vmap[fv[2]]
if len(fn) == 3 and len(ft) == 3:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
txt = TEMPLATE_FACE3_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc)
elif len(fn) == 3:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
txt = TEMPLATE_FACE3_VN % (va, na, vb, nb, vc, nc)
elif len(ft) == 3:
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
txt = TEMPLATE_FACE3_VT % (va, ta, vb, tb, vc, tc)
else:
txt = TEMPLATE_FACE3_V % (va, vb, vc)
elif len(fv) == 4:
va = vmap[fv[0]]
vb = vmap[fv[1]]
vc = vmap[fv[2]]
vd = vmap[fv[3]]
if len(fn) == 4 and len(ft) == 4:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
nd = nmap[fn[3]]
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
td = tmap[ft[3]]
txt = TEMPLATE_FACE4_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc, vd, td, nd)
elif len(fn) == 4:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
nd = nmap[fn[3]]
txt = TEMPLATE_FACE4_VN % (va, na, vb, nb, vc, nc, vd, nd)
elif len(ft) == 4:
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
td = tmap[ft[3]]
txt = TEMPLATE_FACE4_VT % (va, ta, vb, tb, vc, tc, vd, td)
else:
txt = TEMPLATE_FACE4_V % (va, vb, vc, vd)
pieces.append(txt)
str_faces = "\n".join(pieces)
# generate OBJ string
content = TEMPLATE_OBJ % {
"nfaces" : len(chunk["faces"]),
"nvertices" : len(new_vertices),
"nnormals" : len(new_normals),
"nuvs" : len(new_uvs),
"vertices" : str_vertices,
"normals" : str_normals,
"uvs" : str_uvs,
"faces" : str_faces
}
# write OBJ file
outname = "%s_%03d.obj" % (outfile, mi)
f = open(outname, "w")
f.write(content)
f.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o prefix" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:x:", ["help", "input=", "output=", "truncatescale="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Splitting [%s] into [%s_XXX.obj] ..." % (infile, outfile)
break_obj(infile, outfile)
| three.js-master | utils/converters/obj/split_obj.py |
#!/usr/bin/env python
__doc__ = '''
Convert a json file to msgpack.
If fed only an input file the converted will write out a .pack file
of the same base name in the same directory
$ json2msgpack.py -i foo.json
foo.json > foo.pack
Specify an output file path
$ json2msgpack.py -i foo.json -o /bar/tmp/bar.pack
foo.json > /bar/tmp/bar.pack
Dependencies:
https://github.com/msgpack/msgpack-python
'''
import os
import sys
import json
import argparse
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import msgpack
EXT = '.pack'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--infile', required=True,
help='Input json file to convert to msgpack')
parser.add_argument('-o', '--outfile',
help=('Optional output. If not specified the .pack file '\
'will write to the same director as the input file.'))
args = parser.parse_args()
convert(args.infile, args.outfile)
def convert(infile, outfile):
if not outfile:
ext = infile.split('.')[-1]
outfile = '%s%s' % (infile[:-len(ext)-1], EXT)
print('%s > %s' % (infile, outfile))
print('reading in JSON')
with open(infile) as op:
data = json.load(op)
print('writing to msgpack')
with open(outfile, 'wb') as op:
msgpack.dump(data, op)
if __name__ == '__main__':
main()
| three.js-master | utils/converters/msgpack/json2msgpack.py |
version = (0, 4, 2)
| three.js-master | utils/converters/msgpack/msgpack/_version.py |
# coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| three.js-master | utils/converters/msgpack/msgpack/__init__.py |
class UnpackException(Exception):
pass
class BufferFull(UnpackException):
pass
class OutOfData(UnpackException):
pass
class UnpackValueError(UnpackException, ValueError):
pass
class ExtraData(ValueError):
def __init__(self, unpacked, extra):
self.unpacked = unpacked
self.extra = extra
def __str__(self):
return "unpack(b) received extra data."
class PackException(Exception):
pass
class PackValueError(PackException, ValueError):
pass
| three.js-master | utils/converters/msgpack/msgpack/exceptions.py |
"""Fallback pure Python implementation of msgpack"""
import sys
import array
import struct
if sys.version_info[0] == 3:
PY3 = True
int_types = int
Unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
else:
PY3 = False
int_types = (int, long)
Unicode = unicode
def dict_iteritems(d):
return d.iteritems()
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from msgpack.exceptions import (
BufferFull,
OutOfData,
UnpackValueError,
PackValueError,
ExtraData)
from msgpack import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(stream, **kwargs)
ret = unpacker._fb_unpack()
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._fb_unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
class Unpacker(object):
"""
Streaming unpacker.
`file_like` is a file-like object having a `.read(n)` method.
When `Unpacker` is initialized with a `file_like`, `.feed()` is not
usable.
`read_size` is used for `file_like.read(read_size)`.
If `use_list` is True (default), msgpack lists are deserialized to Python
lists. Otherwise they are deserialized to tuples.
`object_hook` is the same as in simplejson. If it is not None, it should
be callable and Unpacker calls it with a dict argument after deserializing
a map.
`object_pairs_hook` is the same as in simplejson. If it is not None, it
should be callable and Unpacker calls it with a list of key-value pairs
after deserializing a map.
`ext_hook` is callback for ext (User defined) type. It called with two
arguments: (code, bytes). default: `msgpack.ExtType`
`encoding` is the encoding used for decoding msgpack bytes. If it is
None (default), msgpack bytes are deserialized to Python bytes.
`unicode_errors` is used for decoding bytes.
`max_buffer_size` limits the buffer size. 0 means INT_MAX (default).
Raises `BufferFull` exception when it is unsufficient.
You should set this parameter when unpacking data from an untrustred source.
example of streaming deserialization from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
do_something(o)
example of streaming deserialization from socket::
unpacker = Unpacker()
while 1:
buf = sock.recv(1024*2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
do_something(o)
"""
def __init__(self, file_like=None, read_size=0, use_list=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors='strict', max_buffer_size=0,
ext_hook=ExtType):
if file_like is None:
self._fb_feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._fb_feeding = False
self._fb_buffers = []
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = 0
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 2048)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
if isinstance(next_bytes, array.array):
next_bytes = next_bytes.tostring()
elif isinstance(next_bytes, bytearray):
next_bytes = bytes(next_bytes)
assert self._fb_feeding
if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
raise BufferFull
self._fb_buf_n += len(next_bytes)
self._fb_buffers.append(next_bytes)
def _fb_consume(self):
self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
if self._fb_buffers:
self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = sum(map(len, self._fb_buffers))
def _fb_got_extradata(self):
if self._fb_buf_i != len(self._fb_buffers):
return True
if self._fb_feeding:
return False
if not self.file_like:
return False
if self.file_like.read(1):
return True
return False
def __iter__(self):
return self
def read_bytes(self, n):
return self._fb_read(n)
def _fb_rollback(self):
self._fb_buf_i = 0
self._fb_buf_o = 0
def _fb_get_extradata(self):
bufs = self._fb_buffers[self._fb_buf_i:]
if bufs:
bufs[0] = bufs[0][self._fb_buf_o:]
return b''.join(bufs)
def _fb_read(self, n, write_bytes=None):
buffs = self._fb_buffers
if (write_bytes is None and self._fb_buf_i < len(buffs) and
self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
self._fb_buf_o += n
return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
ret = b''
while len(ret) != n:
if self._fb_buf_i == len(buffs):
if self._fb_feeding:
break
tmp = self.file_like.read(self._read_size)
if not tmp:
break
buffs.append(tmp)
continue
sliced = n - len(ret)
ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
self._fb_buf_o += sliced
if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
self._fb_buf_o = 0
self._fb_buf_i += 1
if len(ret) != n:
self._fb_rollback()
raise OutOfData
if write_bytes is not None:
write_bytes(ret)
return ret
def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
typ = TYPE_IMMEDIATE
n = 0
obj = None
c = self._fb_read(1, write_bytes)
b = ord(c)
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = struct.unpack("b", c)[0]
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
obj = self._fb_read(n, write_bytes)
typ = TYPE_RAW
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc5:
typ = TYPE_BIN
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc6:
typ = TYPE_BIN
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xca:
obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
elif b == 0xcb:
obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
elif b == 0xcc:
obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
elif b == 0xcd:
obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
elif b == 0xce:
obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
elif b == 0xcf:
obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
elif b == 0xd0:
obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
elif b == 0xd1:
obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
elif b == 0xd2:
obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
elif b == 0xd3:
obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
elif b == 0xd9:
typ = TYPE_RAW
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xda:
typ = TYPE_RAW
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdb:
typ = TYPE_RAW
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdc:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xdd:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xde:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_MAP
elif b == 0xdf:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_MAP
else:
raise UnpackValueError("Unknown header: 0x%x" % b)
return typ, n, obj
def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
typ, n, obj = self._read_header(execute, write_bytes)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise UnpackValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise UnpackValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._fb_unpack(EX_SKIP, write_bytes)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_unpack(EX_SKIP, write_bytes)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._fb_unpack(EX_CONSTRUCT, write_bytes),
self._fb_unpack(EX_CONSTRUCT, write_bytes))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, obj)
if typ == TYPE_BIN:
return obj
assert typ == TYPE_IMMEDIATE
return obj
def next(self):
try:
ret = self._fb_unpack(EX_CONSTRUCT, None)
self._fb_consume()
return ret
except OutOfData:
raise StopIteration
__next__ = next
def skip(self, write_bytes=None):
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_consume()
def unpack(self, write_bytes=None):
ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
self._fb_consume()
return ret
def read_array_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
self._fb_consume()
return ret
def read_map_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
self._fb_consume()
return ret
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param str encoding:
Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return it's content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
use_single_float=False, autoreset=True, use_bin_type=False):
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
default_used = False
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if isinstance(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if isinstance(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
raise PackValueError("Integer value out of range")
if self._use_bin_type and isinstance(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
if isinstance(obj, (Unicode, bytes)):
if isinstance(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
if isinstance(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if isinstance(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if isinstance(obj, (list, tuple)):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if isinstance(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % obj)
def pack(self, obj):
self._pack(obj)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_pairs(self, pairs):
self._fb_pack_map_pairs(len(pairs), pairs)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_array_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_map_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _fb_pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise PackValueError("Array is too large")
def _fb_pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise PackValueError("Dict is too large")
def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._fb_pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def bytes(self):
return self._buffer.getvalue()
def reset(self):
self._buffer = StringIO()
| three.js-master | utils/converters/msgpack/msgpack/fallback.py |
# @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
import re
import json
import types
import shutil
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_prefix = True
option_geometry = False
option_default_camera = False
option_default_light = False
option_pretty_print = False
converter = None
inputFolder = ""
outputFolder = ""
# #####################################################
# Pretty Printing Hacks
# #####################################################
# Force an array to be printed fully on a single line
class NoIndent(object):
def __init__(self, value, separator = ','):
self.separator = separator
self.value = value
def encode(self):
if not self.value:
return None
return '[ %s ]' % self.separator.join(str(f) for f in self.value)
# Force an array into chunks rather than printing each element on a new line
class ChunkedIndent(object):
def __init__(self, value, chunk_size = 15, force_rounding = False):
self.value = value
self.size = chunk_size
self.force_rounding = force_rounding
def encode(self):
# Turn the flat array into an array of arrays where each subarray is of
# length chunk_size. Then string concat the values in the chunked
# arrays, delimited with a ', ' and round the values finally append
# '{CHUNK}' so that we can find the strings with regex later
if not self.value:
return None
if self.force_rounding:
return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
else:
return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
# This custom encoder looks for instances of NoIndent or ChunkedIndent.
# When it finds
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
return obj.encode()
else:
return json.JSONEncoder.default(self, obj)
def executeRegexHacks(output_string):
# turn strings of arrays into arrays (remove the double quotes)
output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
# replace '0metadata' with metadata
output_string = re.sub('0metadata', r'metadata', output_string)
# replace 'zchildren' with children
output_string = re.sub('zchildren', r'children', output_string)
# add an extra newline after '"children": {'
output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
# add an extra newline after '},'
output_string = re.sub('},\s*\n', r'},\n\n', output_string)
# add an extra newline after '\n\s*],'
output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
return output_string
# #####################################################
# Object Serializers
# #####################################################
# FbxVector2 is not JSON serializable
def serializeVector2(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5))
if option_pretty_print:
return NoIndent([v[0], v[1]], ', ')
else:
return [v[0], v[1]]
# FbxVector3 is not JSON serializable
def serializeVector3(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2]], ', ')
else:
return [v[0], v[1], v[2]]
# FbxVector4 is not JSON serializable
def serializeVector4(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if math.isnan(v[3]) or math.isinf(v[3]):
v[3] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2], v[3]], ', ')
else:
return [v[0], v[1], v[2], v[3]]
# #####################################################
# Helpers
# #####################################################
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return int(color)
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
tmp = []
for uv in uvs:
tmp.append(uv[0])
tmp.append(uv[1])
if option_pretty_print:
layer = ChunkedIndent(tmp)
else:
layer = tmp
layers.append(layer)
return layers
# #####################################################
# Object Name Helpers
# #####################################################
def hasUniqueName(o, class_id):
scene = o.GetScene()
object_name = o.GetName()
object_id = o.GetUniqueID()
object_count = scene.GetSrcObjectCount(class_id)
for i in range(object_count):
other = scene.GetSrcObject(class_id, i)
other_id = other.GetUniqueID()
other_name = other.GetName()
if other_id == object_id:
continue
if other_name == object_name:
return False
return True
def getObjectName(o, force_prefix = False):
if not o:
return ""
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxNode.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % object_id
return prefix + object_name
def getMaterialName(o, force_prefix = False):
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % object_id
return prefix + object_name
def getTextureName(t, force_prefix = False):
if type(t) is FbxFileTexture:
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
else:
texture_id = t.GetName()
if texture_id == "_empty_":
texture_id = ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
if len(texture_id) == 0:
prefix = prefix[0:len(prefix)-1]
return prefix + texture_id
def getMtlTextureName(texture_name, texture_id, force_prefix = False):
texture_name = os.path.splitext(texture_name)[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % texture_id
return prefix + texture_name
def getPrefixedName(o, prefix):
return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
# #####################################################
# Triangulation
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate Material Object
# #####################################################
def generate_texture_bindings(material_property, material_params):
# FBX to Three.js texture types
binding_types = {
"DiffuseColor": "map",
"DiffuseFactor": "diffuseFactor",
"EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor",
"AmbientColor": "ambientMap",
"AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap",
"SpecularFactor": "specularFactor",
"ShininessExponent": "shininessExponent",
"NormalMap": "normalMap",
"Bump": "bumpMap",
"TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor",
"ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor",
"DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
def generate_material_object(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = None
material_params = None
material_type = None
if implementation:
print("Shader materials are not supported")
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
specular = getHex(material.Specular.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
shininess = material.Shininess.Get()
transparent = False
reflectivity = 1
bumpScale = 1
material_type = 'MeshPhongMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'specular' : specular,
'shininess' : shininess,
'bumpScale' : bumpScale,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
else:
print "Unknown type of Material", getMaterialName(material)
# default to Lambert Material if the current Material type cannot be handeled
if not material_type:
ambient = getHex((0,0,0))
diffuse = getHex((0.5,0.5,0.5))
emissive = getHex((0,0,0))
opacity = 1
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
if option_textures:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, material_params)
material_params['wireframe'] = False
material_params['wireframeLinewidth'] = 1
output = {
'type' : material_type,
'parameters' : material_params
}
return output
def generate_proxy_material_object(node, material_names):
material_type = 'MeshFaceMaterial'
material_params = {
'materials' : material_names
}
output = {
'type' : material_type,
'parameters' : material_params
}
return output
# #####################################################
# Find Scene Materials
# #####################################################
def extract_materials_from_node(node, material_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
if material_count > 1:
proxy_material = generate_proxy_material_object(node, material_names)
proxy_name = getMaterialName(node, True)
material_dict[proxy_name] = proxy_material
def generate_materials_from_hierarchy(node, material_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_dict)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
def generate_material_dict(scene):
material_dict = {}
# generate all materials for this scene
material_count = scene.GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for i in range(material_count):
material = scene.GetSrcObject(FbxSurfaceMaterial.ClassId, i)
material_object = generate_material_object(material)
material_name = getMaterialName(material)
material_dict[material_name] = material_object
# generate material porxies
# Three.js does not support meshs with multiple materials, however it does
# support materials with multiple submaterials
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
return material_dict
# #####################################################
# Generate Texture Object
# #####################################################
def generate_texture_object(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
if type(texture) is FbxFileTexture:
url = texture.GetFileName()
else:
url = getTextureName( texture )
url = replace_inFolder2OutFolder( url )
output = {
'url': url,
'repeat': serializeVector2( (1,1) ),
'offset': serializeVector2( texture.GetUVTranslation() ),
'magFilter': 'LinearFilter',
'minFilter': 'LinearMipMapLinearFilter',
'anisotropy': True
}
return output
# #####################################################
# Replace Texture input path to output
# #####################################################
def replace_inFolder2OutFolder(url):
folderIndex = url.find(inputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(inputFolder): ]
url = outputFolder + url
return url
# #####################################################
# Replace Texture output path to input
# #####################################################
def replace_OutFolder2inFolder(url):
folderIndex = url.find(outputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(outputFolder): ]
url = inputFolder + url
return url
# #####################################################
# Find Scene Textures
# #####################################################
def extract_material_textures(material_property, texture_dict):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
def extract_textures_from_node(node, texture_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_dict)
def generate_textures_from_hierarchy(node, texture_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_dict)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
def generate_texture_dict(scene):
if not option_textures:
return {}
texture_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
return texture_dict
# #####################################################
# Extract Fbx SDK Mesh Data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
tmp = control_points[i]
tmp = [tmp[0], tmp[1], tmp[2]]
positions.append(tmp)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = [position[0], position[1], position[2]]
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = normals_array.GetAt(i)
normal = [normal[0], normal[1], normal[2]]
normal_values.append(normal)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
t = FbxVector4(0,0,0,1)
transform.SetRow(3, t)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal.Normalize()
normal = [normal[0], normal[1], normal[2]]
normal_values[i] = normal
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
# mapping mode is by control points. The mesh should be smooth and soft.
# we can get normals by retrieving each control point
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
# reference mode is direct, the normal index is same as vertex index.
# get normals by the index of control vertex
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
# mapping mode is by polygon-vertex.
# we can get normals by retrieving polygon-vertex.
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = colors_array.GetAt(i)
color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_indices.append( color_indices )
layered_color_values.append( color_values )
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
'''
# The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
# This causes most models to receive incorrect vertex colors
if len(color_values) == 0:
color = mesh.Color.Get()
color_values = [[color[0], color[1], color[2]]]
color_indices = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
color_indices.append([0] * poly_size)
'''
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = uvs_array.GetAt(i)
uv = [uv[0], uv[1]]
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Process Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = ncolors
ncolors += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
flipWindingOrder = False
node = mesh.GetNode()
if node:
local_scale = node.EvaluateLocalScaling()
if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
flipWindingOrder = True
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
if poly_size > 4:
new_face_normals = []
new_face_colors = []
new_face_uv_layers = []
for i in range(poly_size - 2):
new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
if len(face_normals):
new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
if len(face_colors):
new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
if len(face_uv_layers):
new_face_uv_layers = []
for layer in face_uv_layers:
new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
face = generate_mesh_face(mesh,
poly_index,
new_face_vertices,
new_face_normals,
new_face_colors,
new_face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
else:
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
if flipOrder:
if nVertices == 3:
vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
uv_layers = tmp
else:
vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[3], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[3], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
uv_layers = tmp
for i in range(nVertices):
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return faceData
# #####################################################
# Generate Mesh Object (for scene output format)
# #####################################################
def generate_scene_output(node):
mesh = node.GetNodeAttribute()
# This is done in order to keep the scene output and non-scene output code DRY
mesh_list = [ mesh ]
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable automatic json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = ChunkedIndent(vertices, 15, True)
normal_values = ChunkedIndent(normal_values, 15, True)
color_values = ChunkedIndent(color_values, 15)
faces = ChunkedIndent(faces, 30)
metadata = {
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Mesh Object (for non-scene output)
# #####################################################
def generate_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = NoIndent(vertices)
normal_values = NoIndent(normal_values)
color_values = NoIndent(color_values)
faces = NoIndent(faces)
metadata = {
'formatVersion' : 3,
'type' : 'geometry',
'generatedBy' : 'convert-to-threejs.py',
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate Embed Objects
# #####################################################
def generate_embed_dict_from_hierarchy(node, embed_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_object = generate_scene_output(node)
embed_name = getPrefixedName(node, 'Embed')
embed_dict[embed_name] = embed_object
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
def generate_embed_dict(scene):
embed_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
return embed_dict
# #####################################################
# Generate Geometry Objects
# #####################################################
def generate_geometry_object(node):
output = {
'type' : 'embedded',
'id' : getPrefixedName( node, 'Embed' )
}
return output
def generate_geometry_dict_from_hierarchy(node, geometry_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_object = generate_geometry_object(node)
geometry_name = getPrefixedName( node, 'Geometry' )
geometry_dict[geometry_name] = geometry_object
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
def generate_geometry_dict(scene):
geometry_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
return geometry_dict
# #####################################################
# Generate Light Node Objects
# #####################################################
def generate_default_light():
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = {
'type': 'DirectionalLight',
'color': getHex(color),
'intensity': intensity/100.00,
'direction': serializeVector3( direction ),
'target': getObjectName( None )
}
return output
def generate_light_object(node):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = None
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
output = {
'type': 'DirectionalLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'direction': serializeVector3( direction ),
'target': getObjectName( node.GetTarget() )
}
elif light_type == "point":
output = {
'type': 'PointLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get()
}
elif light_type == "spot":
output = {
'type': 'SpotLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get(),
'angle': light.OuterAngle.Get()*math.pi/180,
'exponent': light.DecayType.Get(),
'target': getObjectName( node.GetTarget() )
}
return output
def generate_ambient_light(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
output = {
'type': 'AmbientLight',
'color': getHex(ambient_color)
}
return output
# #####################################################
# Generate Camera Node Objects
# #####################################################
def generate_default_camera():
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
def generate_camera_object(node):
camera = node.GetNodeAttribute()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
name = getObjectName( node )
output = {}
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'aspect': aspect,
'near': near,
'far': far,
'position': serializeVector3( position )
}
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = {
'type': 'PerspectiveCamera',
'left': left,
'right': right,
'top': top,
'bottom': bottom,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
# #####################################################
# Generate Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate Mesh Node Object
# #####################################################
def generate_mesh_object(node):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
if not material_count > 1 and not len(material_names) > 0:
material_names.append('')
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = {
'geometry': getPrefixedName( node, 'Geometry' ),
'material': material_name,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True,
}
return output
# #####################################################
# Generate Node Object
# #####################################################
def generate_object(node):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
name = getObjectName( node )
output = {
'fbx_type': node_type,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True
}
return output
# #####################################################
# Parse Scene Node Objects
# #####################################################
def generate_object_hierarchy(node, object_dict):
object_count = 0
if node.GetNodeAttribute() == None:
object_data = generate_object(node)
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_data = generate_mesh_object(node)
elif attribute_type == FbxNodeAttribute.eLight:
object_data = generate_light_object(node)
elif attribute_type == FbxNodeAttribute.eCamera:
object_data = generate_camera_object(node)
else:
object_data = generate_object(node)
object_count += 1
object_name = getObjectName(node)
object_children = {}
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_children)
if node.GetChildCount() > 0:
# Having 'children' above other attributes is hard to read.
# We can send it to the bottom using the last letter of the alphabet 'z'.
# This letter is removed from the final output.
if option_pretty_print:
object_data['zchildren'] = object_children
else:
object_data['children'] = object_children
object_dict[object_name] = object_data
return object_count
def generate_scene_objects(scene):
object_count = 0
object_dict = {}
ambient_light = generate_ambient_light(scene)
if ambient_light:
object_dict['AmbientLight'] = ambient_light
object_count += 1
if option_default_light:
default_light = generate_default_light()
object_dict['DefaultLight'] = default_light
object_count += 1
if option_default_camera:
default_camera = generate_default_camera()
object_dict['DefaultCamera'] = default_camera
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
return object_dict, object_count
# #####################################################
# Generate Scene Output
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects(scene)
textures = generate_texture_dict(scene)
materials = generate_material_dict(scene)
geometries = generate_geometry_dict(scene)
embeds = generate_embed_dict(scene)
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = serializeVector3( (0,0,0) )
rotation = serializeVector3( (0,0,0) )
scale = serializeVector3( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = camera_names[0] if len(camera_names) > 0 else ""
if option_default_camera:
defcamera = 'default_camera'
metadata = {
'formatVersion': 3.2,
'type': 'scene',
'generatedBy': 'convert-to-threejs.py',
'objects': nobjects,
'geometries': ngeometries,
'materials': nmaterials,
'textures': ntextures
}
transform = {
'position' : position,
'rotation' : rotation,
'scale' : scale
}
defaults = {
'bgcolor' : 0,
'camera' : defcamera,
'fog' : ''
}
output = {
'objects': objects,
'geometries': geometries,
'materials': materials,
'textures': textures,
'embeds': embeds,
'transform': transform,
'defaults': defaults,
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Non-Scene Output
# #####################################################
def extract_geometry(scene, filename):
output = generate_non_scene_output(scene)
return output
# #####################################################
# File Helpers
# #####################################################
def write_file(filepath, content):
index = filepath.rfind('/')
dir = filepath[0:index]
if not os.path.exists(dir):
os.makedirs(dir)
out = open(filepath, "w")
out.write(content.encode('utf8', 'replace'))
out.close()
def read_file(filepath):
f = open(filepath)
content = f.readlines()
f.close()
return content
def copy_textures(textures):
texture_dict = {}
for key in textures:
url = textures[key]['url']
src = replace_OutFolder2inFolder(url)
if url in texture_dict: # texture has been copied
continue
if not os.path.exists(src):
print("copy_texture error: we can't find this texture at " + src)
continue
try:
index = url.rfind('/')
folder = url[0:index]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
shutil.copyfile(src, url)
texture_dict[url] = True
except IOError as e:
print "I/O error({0}): {1} {2}".format(e.errno, e.strerror, src)
def findFilesWithExt(directory, ext, include_path = True):
ext = ext.lower()
found = []
for root, dirs, files in os.walk(directory):
for filename in files:
current_ext = os.path.splitext(filename)[1].lower()
if current_ext == ext:
if include_path:
found.append(os.path.join(root, filename))
else:
found.append(filename)
return found
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_prefix = options.prefix
option_geometry = options.geometry
option_default_camera = options.defcamera
option_default_light = options.deflight
option_pretty_print = options.pretty
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
# According to asset's coordinate to convert scene
upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
axis_system = FbxAxisSystem.MayaYUp
if upVector[0] == 3:
axis_system = FbxAxisSystem.MayaZUp
axis_system.ConvertScene(scene)
inputFolder = args[0].replace( "\\", "/" );
index = args[0].rfind( "/" );
inputFolder = inputFolder[:index]
outputFolder = args[1].replace( "\\", "/" );
index = args[1].rfind( "/" );
outputFolder = outputFolder[:index]
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
if option_pretty_print:
output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
output_string = executeRegexHacks(output_string)
else:
output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_string)
copy_textures( output_content['textures'] )
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
| three.js-master | utils/converters/fbx/convert_to_threejs.py |
"""Join multiple binary files into single file and generate JSON snippet with offsets
-------------------------------------
How to use
-------------------------------------
python join_ctm.py -i "part_*.ctm" -o joined.ctm [-j offsets.js]
Will read multiple files following wildcard pattern (ordered lexicographically):
part_000.ctm
part_001.ctm
part_002.ctm
...
part_XXX.ctm
And generate single concatenated files:
joined.ctm
offsets.js (optional, offsets are also dumped to standard output)
"""
import getopt
import glob
import sys
import os
# #####################################################
# Templates
# #####################################################
TEMPLATE_JSON = u"""\
"offsets": [ %(offsets)s ],
"""
# #############################################################################
# Helpers
# #############################################################################
def usage():
print 'Usage: %s -i "filename_*.ctm" -o filename.ctm [-j offsets.js]' % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:j:", ["help", "input=", "output=", "json="])
except getopt.GetoptError:
usage()
sys.exit(2)
inpattern = ""
outname = ""
jsonname = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
inpattern = a
elif o in ("-o", "--output"):
outname = a
elif o in ("-j", "--json"):
jsonname = a
# quit if required parameters are missing
if inpattern == "" or outname == "":
usage()
sys.exit(2)
outfile = open(outname, "wb")
matches = glob.glob(inpattern)
matches.sort()
total = 0
offsets = []
for filename in matches:
filesize = os.path.getsize(filename)
offsets.append(total)
total += filesize
print filename, filesize
infile = open(filename, "rb")
buffer = infile.read()
outfile.write(buffer)
infile.close()
outfile.close()
json_str = TEMPLATE_JSON % {
"offsets" : ", ".join(["%d" % o for o in offsets])
}
print json_str
if jsonname:
jsonfile = open(jsonname, "w")
jsonfile.write(json_str)
jsonfile.close() | three.js-master | utils/converters/ctm/join_ctm.py |
#!/usr/bin/env python
import sys
if sys.version_info < (2, 7):
print("This script requires at least Python 2.7.")
print("Please, update to a newer version: http://www.python.org/download/releases/")
# exit()
import argparse
import json
import os
import shutil
import tempfile
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--include', action='append', required=True)
parser.add_argument('--externs', action='append', default=['externs/common.js'])
parser.add_argument('--amd', action='store_true', default=False)
parser.add_argument('--minify', action='store_true', default=False)
parser.add_argument('--output', default='../../build/three.js')
parser.add_argument('--sourcemaps', action='store_true', default=False)
args = parser.parse_args()
output = args.output
# merge
print(' * Building ' + output)
# enable sourcemaps support
if args.sourcemaps:
sourcemap = output + '.map'
sourcemapping = '\n//@ sourceMappingURL=' + sourcemap
sourcemapargs = ' --create_source_map ' + sourcemap + ' --source_map_format=V3'
else:
sourcemap = sourcemapping = sourcemapargs = ''
fd, path = tempfile.mkstemp()
tmp = open(path, 'w')
sources = []
if args.amd:
tmp.write('( function ( root, factory ) {\n\n\tif ( typeof define === \'function\' && define.amd ) {\n\n\t\tdefine( [ \'exports\' ], factory );\n\n\t} else if ( typeof exports === \'object\' ) {\n\n\t\tfactory( exports );\n\n\t} else {\n\n\t\tfactory( root );\n\n\t}\n\n}( this, function ( exports ) {\n\n')
for include in args.include:
with open('includes/' + include + '.json','r') as f:
files = json.load(f)
for filename in files:
tmp.write('// File:' + filename)
tmp.write('\n\n')
filename = '../../' + filename
sources.append(filename)
with open(filename, 'r') as f:
if filename.endswith(".glsl"):
tmp.write('THREE.ShaderChunk[ \'' + os.path.splitext(os.path.basename(filename))[0] + '\'] = "')
tmp.write(f.read().replace('\n','\\n'))
tmp.write('";\n\n')
else:
tmp.write(f.read())
tmp.write('\n')
if args.amd:
tmp.write('exports.THREE = THREE;\n\n} ) );')
tmp.close()
# save
if args.minify is False:
shutil.copy(path, output)
os.chmod(output, 0o664) # temp files would usually get 0600
else:
backup = ''
if os.path.exists(output):
with open(output,'r') as f: backup = f.read()
os.remove(output)
externs = ' --externs '.join(args.externs)
source = ' '.join(sources)
cmd = 'java -jar compiler/compiler.jar --warning_level=VERBOSE --jscomp_off=globalThis --externs %s --jscomp_off=checkTypes --language_in=ECMASCRIPT5_STRICT --js %s --js_output_file %s %s' % (externs, path, output, sourcemapargs)
os.system(cmd)
# header
if os.path.exists(output):
with open(output,'r') as f: text = f.read()
with open(output,'w') as f: f.write('// threejs.org/license\n' + text + sourcemapping)
else:
print("Minification with Closure compiler failed. Check your Java runtime version.")
with open(output,'w') as f: f.write(backup)
os.close(fd)
os.remove(path)
if __name__ == "__main__":
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
main()
| three.js-master | utils/build/build.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, title='', info=None, phoneme_seq=None,
vmin=None, vmax=None):
if phoneme_seq:
fig, ax = plt.subplots(figsize=(15, 10))
else:
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none', vmin=vmin, vmax=vmax)
ax.set_title(title)
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
if phoneme_seq != None:
# for debugging of phonemes and durs in maps. Not used by def in training code
ax.set_yticks(np.arange(len(phoneme_seq)))
ax.set_yticklabels(phoneme_seq)
ax.hlines(np.arange(len(phoneme_seq)), xmin=0.0, xmax=max(ax.get_xticks()))
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
| radtts-main | plotting_utils.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import os
import argparse
import json
import numpy as np
from scipy.io.wavfile import write
import torch
from torch.cuda import amp
from radtts import RADTTS
from torch.utils.data import DataLoader
from data import Data, DataCollate
from train import update_params, parse_data_from_batch
from hifigan_models import Generator
from hifigan_env import AttrDict
from hifigan_denoiser import Denoiser
from tqdm import tqdm
def is_feature_invalid(x, max_val):
return (torch.isnan(x).any().item() or
x.sum() == 0 or
(x.max().item() > max_val))
def get_configs(config_path, params):
# Parse configs. Globals nicer in this case
with open(config_path) as f:
data = f.read()
config = json.loads(data)
update_params(config, params)
data_config = config["data_config"]
model_config = config["model_config"]
return model_config, data_config
def load_vocoder(vocoder_path, config_path, to_cuda=True):
with open(config_path) as f:
data_vocoder = f.read()
config_vocoder = json.loads(data_vocoder)
h = AttrDict(config_vocoder)
if 'blur' in vocoder_path:
config_vocoder['gaussian_blur']['p_blurring'] = 0.5
else:
if 'gaussian_blur' in config_vocoder:
config_vocoder['gaussian_blur']['p_blurring'] = 0.0
else:
config_vocoder['gaussian_blur'] = {'p_blurring': 0.0}
h['gaussian_blur'] = {'p_blurring': 0.0}
state_dict_g = torch.load(vocoder_path, map_location='cpu')['generator']
# load hifigan
vocoder = Generator(h)
vocoder.load_state_dict(state_dict_g)
denoiser = Denoiser(vocoder)
if to_cuda:
vocoder.cuda()
denoiser.cuda()
vocoder.eval()
denoiser.eval()
return vocoder, denoiser
def infer(radtts_path, radtts_config_path, vocoder_path,
vocoder_config_path, n_samples, sigma, use_amp, seed, output_dir,
denoising_strength, params, shuffle, takes, save_mels, no_audio,
predict_features, sigma_f0=1.0, sigma_energy=0.8,
save_features=False, plot_features=False, f0_mean=0.0, f0_std=0.0,
energy_mean=0.0, energy_std=0.0, filter_invalid=False):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
ignore_keys = ['training_files', 'validation_files']
vocoder, denoiser = load_vocoder(vocoder_path, vocoder_config_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
model_config, data_config = get_configs(radtts_config_path, params)
radtts = RADTTS(**model_config)
radtts.enable_inverse_cache() # cache inverse matrix for 1x1 invertible convs
print("Loading checkpoint '{}'" .format(radtts_path))
checkpoint_dict = torch.load(radtts_path, map_location='cpu')
state_dict = checkpoint_dict['state_dict']
radtts.load_state_dict(state_dict)
radtts.remove_norms()
radtts.eval()
radtts.cuda()
print("Loaded checkpoint '{}'" .format(radtts_path))
trainset = Data(
data_config['training_files'],
**dict((k, v) for k, v in data_config.items() if k not in ignore_keys))
data_config['aug_probabilities'] = None
data_config['dur_max'] = 60
valset = Data(data_config['validation_files'],
**dict((k, v) for k, v in data_config.items()
if k not in ignore_keys),
speaker_ids=trainset.speaker_ids)
collate_fn = DataCollate()
dataloader = DataLoader(valset, num_workers=1, shuffle=shuffle,
sampler=None, batch_size=1,
pin_memory=False, drop_last=False,
collate_fn=collate_fn)
f0_max = trainset.f0_max
energy_max = 1.0
for k, batch in enumerate(dataloader):
(mel, speaker_ids, text, in_lens, out_lens, attn_prior,
f0, voiced_mask, p_voiced, energy_avg,
audiopaths) = parse_data_from_batch(batch)
filename = os.path.splitext(
os.path.basename(batch['audiopaths'][0]))[0]
f0_gt, energy_avg_gt = f0.clone(), energy_avg.clone()
suffix_path = "sid{}_sigma{}".format(speaker_ids.item(), sigma)
print("sample", k, filename)
with amp.autocast(use_amp):
# extract duration from attention using ground truth mel
outputs = radtts(
mel, speaker_ids, text, in_lens, out_lens, True,
attn_prior=attn_prior, f0=f0, energy_avg=energy_avg,
voiced_mask=voiced_mask, p_voiced=p_voiced)
dur_target = outputs['attn'][0, 0].sum(0, keepdim=True)
dur_target = (dur_target + 0.5).floor().int()
with amp.autocast(use_amp):
for j in tqdm(range(takes)):
audio_path = "{}/{}_{}_{}_denoised.wav".format(
output_dir, filename, j, suffix_path)
if os.path.exists(audio_path):
print("skipping", audio_path)
continue
if predict_features:
f0_is_invalid, energy_is_invalid = True, True
while f0_is_invalid or energy_is_invalid:
model_output = radtts.infer(
speaker_ids, text, sigma, None, sigma_f0,
sigma_energy, dur=dur_target)
f0 = model_output['f0']
energy_avg = model_output['energy_avg']
if filter_invalid:
f0_is_invalid = is_feature_invalid(f0, f0_max)
energy_is_invalid = is_feature_invalid(
energy_avg, energy_max)
else:
f0_is_invalid, energy_is_invalid = False, False
else:
model_output = radtts.infer(
speaker_ids, text, sigma, dur=dur_target, f0=f0,
energy_avg=energy_avg, voiced_mask=voiced_mask,
f0_mean=f0_mean, f0_std=f0_std,
energy_mean=energy_mean, energy_std=energy_std)
mel = model_output['mel']
if save_mels:
np.save("{}/{}_{}_{}_mel".format(
output_dir, filename, j, suffix_path),
mel.cpu().numpy())
if not no_audio:
audio = vocoder(mel).float()[0]
audio_denoised = denoiser(
audio, strength=denoising_strength)[0].float()
audio = audio[0].cpu().numpy()
audio_denoised = audio_denoised[0].cpu().numpy()
write("{}/{}_{}_{}.wav".format(
output_dir, filename, j, suffix_path),
data_config['sampling_rate'], audio_denoised)
if plot_features:
fig, axes = plt.subplots(2, 1, figsize=(8, 3))
axes[0].plot(f0_gt[0].cpu(), label='gt')
axes[0].plot(f0[0].cpu(), label='pred')
axes[1].plot(energy_avg_gt[0].cpu(), label='gt')
axes[1].plot(energy_avg[0].cpu(), label='pred')
plt.savefig("{}/{}_{}_{}.png".format(
output_dir, filename, j, suffix_path))
plt.close("all")
if save_features:
mask = f0 < data_config['f0_min']
f0[mask] = 0.0
np.save("{}/{}_{}_{}_f0".format(
output_dir, filename, j, suffix_path),
f0.cpu().numpy())
np.save("{}/{}_{}_{}_energy".format(
output_dir, filename, j, suffix_path),
energy_avg.cpu().numpy())
if k + 1 == n_samples:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--radtts_path', type=str)
parser.add_argument('-c', '--radtts_config_path', type=str, help='vocoder JSON file config')
parser.add_argument('-v', '--vocoder_path', type=str)
parser.add_argument('-k', '--vocoder_config_path', type=str, help='vocoder JSON file config')
parser.add_argument('-p', '--params', nargs='+', default=[])
parser.add_argument('-n', '--n_samples', default=5, type=int)
parser.add_argument("-s", "--sigma", default=0.8, type=float)
parser.add_argument("--sigma_f0", default=1.0, type=float)
parser.add_argument("--sigma_energy", default=1.0, type=float)
parser.add_argument("--f0_mean", default=0.0, type=float)
parser.add_argument("--f0_std", default=0.0, type=float)
parser.add_argument("--energy_mean", default=0.0, type=float)
parser.add_argument("--energy_std", default=0.0, type=float)
parser.add_argument("--seed", default=1234, type=int)
parser.add_argument("--use_amp", action="store_true")
parser.add_argument("-o", '--output_dir', type=str)
parser.add_argument("-d", "--denoising_strength", default=0.01, type=float)
parser.add_argument("--shuffle", action="store_true")
parser.add_argument("--save_mels", action="store_true")
parser.add_argument("--no_audio", action="store_true")
parser.add_argument("--predict_features", action="store_true")
parser.add_argument("--save_features", action="store_true")
parser.add_argument("--plot_features", action="store_true")
parser.add_argument("--filter_invalid", action="store_true")
parser.add_argument('-t', '--takes', default=1, type=int)
args = parser.parse_args()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with torch.no_grad():
infer(args.radtts_path, args.radtts_config_path, args.vocoder_path,
args.vocoder_config_path, args.n_samples, args.sigma,
args.use_amp, args.seed, args.output_dir,
args.denoising_strength, args.params, args.shuffle, args.takes,
args.save_mels, args.no_audio, args.predict_features,
args.sigma_f0, args.sigma_energy, args.save_features,
args.plot_features, args.f0_mean, args.f0_std, args.energy_mean,
args.energy_std, args.filter_invalid)
| radtts-main | inference_voice_conversion.py |
# original source takes from https://github.com/jik876/hifi-gan/
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
| radtts-main | hifigan_env.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
from torch import nn
from common import Encoder, LengthRegulator, ConvAttention
from common import Invertible1x1ConvLUS, Invertible1x1Conv
from common import AffineTransformationLayer, LinearNorm, ExponentialClass
from common import get_mask_from_lengths
from attribute_prediction_model import get_attribute_prediction_model
from alignment import mas_width1 as mas
class FlowStep(nn.Module):
def __init__(self, n_mel_channels, n_context_dim, n_layers,
affine_model='simple_conv', scaling_fn='exp',
matrix_decomposition='', affine_activation='softplus',
use_partial_padding=False, cache_inverse=False):
super(FlowStep, self).__init__()
if matrix_decomposition == 'LUS':
self.invtbl_conv = Invertible1x1ConvLUS(n_mel_channels, cache_inverse=cache_inverse)
else:
self.invtbl_conv = Invertible1x1Conv(n_mel_channels, cache_inverse=cache_inverse)
self.affine_tfn = AffineTransformationLayer(
n_mel_channels, n_context_dim, n_layers, affine_model=affine_model,
scaling_fn=scaling_fn, affine_activation=affine_activation,
use_partial_padding=use_partial_padding)
def enable_inverse_cache(self):
self.invtbl_conv.cache_inverse=True
def forward(self, z, context, inverse=False, seq_lens=None):
if inverse: # for inference z-> mel
z = self.affine_tfn(z, context, inverse, seq_lens=seq_lens)
z = self.invtbl_conv(z, inverse)
return z
else: # training mel->z
z, log_det_W = self.invtbl_conv(z)
z, log_s = self.affine_tfn(z, context, seq_lens=seq_lens)
return z, log_det_W, log_s
class RADTTS(torch.nn.Module):
def __init__(self, n_speakers, n_speaker_dim, n_text, n_text_dim, n_flows,
n_conv_layers_per_step, n_mel_channels, n_hidden,
mel_encoder_n_hidden, dummy_speaker_embedding, n_early_size,
n_early_every, n_group_size, affine_model, dur_model_config,
f0_model_config, energy_model_config, v_model_config=None,
include_modules='dec', scaling_fn='exp',
matrix_decomposition='', learn_alignments=False,
affine_activation='softplus', attn_use_CTC=True,
use_speaker_emb_for_alignment=False, use_context_lstm=False,
context_lstm_norm=None, text_encoder_lstm_norm=None,
n_f0_dims=0, n_energy_avg_dims=0,
context_lstm_w_f0_and_energy=True,
use_first_order_features=False, unvoiced_bias_activation='',
ap_pred_log_f0=False, **kwargs):
super(RADTTS, self).__init__()
assert(n_early_size % 2 == 0)
self.do_mel_descaling = kwargs.get('do_mel_descaling', True)
self.n_mel_channels = n_mel_channels
self.n_f0_dims = n_f0_dims # >= 1 to trains with f0
self.n_energy_avg_dims = n_energy_avg_dims # >= 1 trains with energy
self.decoder_use_partial_padding = kwargs.get(
'decoder_use_partial_padding', True)
self.n_speaker_dim = n_speaker_dim
assert(self.n_speaker_dim % 2 == 0)
self.speaker_embedding = torch.nn.Embedding(
n_speakers, self.n_speaker_dim)
self.embedding = torch.nn.Embedding(n_text, n_text_dim)
self.flows = torch.nn.ModuleList()
self.encoder = Encoder(encoder_embedding_dim=n_text_dim,
norm_fn=nn.InstanceNorm1d,
lstm_norm_fn=text_encoder_lstm_norm)
self.dummy_speaker_embedding = dummy_speaker_embedding
self.learn_alignments = learn_alignments
self.affine_activation = affine_activation
self.include_modules = include_modules
self.attn_use_CTC = bool(attn_use_CTC)
self.use_speaker_emb_for_alignment = use_speaker_emb_for_alignment
self.use_context_lstm = bool(use_context_lstm)
self.context_lstm_norm = context_lstm_norm
self.context_lstm_w_f0_and_energy = context_lstm_w_f0_and_energy
self.length_regulator = LengthRegulator()
self.use_first_order_features = bool(use_first_order_features)
self.decoder_use_unvoiced_bias = kwargs.get(
'decoder_use_unvoiced_bias', True)
self.ap_pred_log_f0 = ap_pred_log_f0
self.ap_use_unvoiced_bias = kwargs.get('ap_use_unvoiced_bias', True)
self.attn_straight_through_estimator = kwargs.get(
'attn_straight_through_estimator', False)
if 'atn' in include_modules or 'dec' in include_modules:
if self.learn_alignments:
if self.use_speaker_emb_for_alignment:
self.attention = ConvAttention(
n_mel_channels, n_text_dim + self.n_speaker_dim)
else:
self.attention = ConvAttention(n_mel_channels, n_text_dim)
self.n_flows = n_flows
self.n_group_size = n_group_size
n_flowstep_cond_dims = (
self.n_speaker_dim +
(n_text_dim + n_f0_dims + n_energy_avg_dims) * n_group_size)
if self.use_context_lstm:
n_in_context_lstm = (
self.n_speaker_dim + n_text_dim * n_group_size)
n_context_lstm_hidden = int(
(self.n_speaker_dim + n_text_dim * n_group_size) / 2)
if self.context_lstm_w_f0_and_energy:
n_in_context_lstm = (
n_f0_dims + n_energy_avg_dims + n_text_dim)
n_in_context_lstm *= n_group_size
n_in_context_lstm += self.n_speaker_dim
n_context_hidden = (
n_f0_dims + n_energy_avg_dims + n_text_dim)
n_context_hidden = n_context_hidden * n_group_size / 2
n_context_hidden = self.n_speaker_dim + n_context_hidden
n_context_hidden = int(n_context_hidden)
n_flowstep_cond_dims = (
self.n_speaker_dim + n_text_dim * n_group_size)
self.context_lstm = torch.nn.LSTM(
input_size=n_in_context_lstm,
hidden_size=n_context_lstm_hidden, num_layers=1,
batch_first=True, bidirectional=True)
if context_lstm_norm is not None:
if 'spectral' in context_lstm_norm:
print("Applying spectral norm to context encoder LSTM")
lstm_norm_fn_pntr = torch.nn.utils.spectral_norm
elif 'weight' in context_lstm_norm:
print("Applying weight norm to context encoder LSTM")
lstm_norm_fn_pntr = torch.nn.utils.weight_norm
self.context_lstm = lstm_norm_fn_pntr(
self.context_lstm, 'weight_hh_l0')
self.context_lstm = lstm_norm_fn_pntr(
self.context_lstm, 'weight_hh_l0_reverse')
if self.n_group_size > 1:
self.unfold_params = {'kernel_size': (n_group_size, 1),
'stride': n_group_size,
'padding': 0, 'dilation': 1}
self.unfold = nn.Unfold(**self.unfold_params)
self.exit_steps = []
self.n_early_size = n_early_size
n_mel_channels = n_mel_channels*n_group_size
for i in range(self.n_flows):
if i > 0 and i % n_early_every == 0: # early exitting
n_mel_channels -= self.n_early_size
self.exit_steps.append(i)
self.flows.append(FlowStep(
n_mel_channels, n_flowstep_cond_dims,
n_conv_layers_per_step, affine_model, scaling_fn,
matrix_decomposition, affine_activation=affine_activation,
use_partial_padding=self.decoder_use_partial_padding))
if 'dpm' in include_modules:
dur_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
self.dur_pred_layer = get_attribute_prediction_model(
dur_model_config)
self.use_unvoiced_bias = False
self.use_vpred_module = False
self.ap_use_voiced_embeddings = kwargs.get(
'ap_use_voiced_embeddings', True)
if self.decoder_use_unvoiced_bias or self.ap_use_unvoiced_bias:
assert (unvoiced_bias_activation in {'relu', 'exp'})
self.use_unvoiced_bias = True
if unvoiced_bias_activation == 'relu':
unvbias_nonlin = nn.ReLU()
elif unvoiced_bias_activation == 'exp':
unvbias_nonlin = ExponentialClass()
else:
exit(1) # we won't reach here anyway due to the assertion
self.unvoiced_bias_module = nn.Sequential(
LinearNorm(n_text_dim, 1), unvbias_nonlin)
# all situations in which the vpred module is necessary
if self.ap_use_voiced_embeddings or self.use_unvoiced_bias or 'vpred' in include_modules:
self.use_vpred_module = True
if self.use_vpred_module:
v_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
self.v_pred_module = get_attribute_prediction_model(v_model_config)
# 4 embeddings, first two are scales, second two are biases
if self.ap_use_voiced_embeddings:
self.v_embeddings = torch.nn.Embedding(4, n_text_dim)
if 'apm' in include_modules:
f0_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
energy_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
if self.use_first_order_features:
f0_model_config['hparams']['n_in_dim'] = 2
energy_model_config['hparams']['n_in_dim'] = 2
if 'spline_flow_params' in f0_model_config['hparams'] and f0_model_config['hparams']['spline_flow_params'] is not None:
f0_model_config['hparams']['spline_flow_params']['n_in_channels'] = 2
if 'spline_flow_params' in energy_model_config['hparams'] and energy_model_config['hparams']['spline_flow_params'] is not None:
energy_model_config['hparams']['spline_flow_params']['n_in_channels'] = 2
else:
if 'spline_flow_params' in f0_model_config['hparams'] and f0_model_config['hparams']['spline_flow_params'] is not None:
f0_model_config['hparams']['spline_flow_params']['n_in_channels'] = f0_model_config['hparams']['n_in_dim']
if 'spline_flow_params' in energy_model_config['hparams'] and energy_model_config['hparams']['spline_flow_params'] is not None:
energy_model_config['hparams']['spline_flow_params']['n_in_channels'] = energy_model_config['hparams']['n_in_dim']
self.f0_pred_module = get_attribute_prediction_model(
f0_model_config)
self.energy_pred_module = get_attribute_prediction_model(
energy_model_config)
def is_attribute_unconditional(self):
"""
returns true if the decoder is conditioned on neither energy nor F0
"""
return self.n_f0_dims == 0 and self.n_energy_avg_dims == 0
def encode_speaker(self, spk_ids):
spk_ids = spk_ids * 0 if self.dummy_speaker_embedding else spk_ids
spk_vecs = self.speaker_embedding(spk_ids)
return spk_vecs
def encode_text(self, text, in_lens):
# text_embeddings: b x len_text x n_text_dim
text_embeddings = self.embedding(text).transpose(1, 2)
# text_enc: b x n_text_dim x encoder_dim (512)
if in_lens is None:
text_enc = self.encoder.infer(text_embeddings).transpose(1, 2)
else:
text_enc = self.encoder(text_embeddings, in_lens).transpose(1, 2)
return text_enc, text_embeddings
def preprocess_context(self, context, speaker_vecs, out_lens=None, f0=None,
energy_avg=None):
if self.n_group_size > 1:
# unfolding zero-padded values
context = self.unfold(context.unsqueeze(-1))
if f0 is not None:
f0 = self.unfold(f0[:, None, :, None])
if energy_avg is not None:
energy_avg = self.unfold(energy_avg[:, None, :, None])
speaker_vecs = speaker_vecs[..., None].expand(-1, -1, context.shape[2])
context_w_spkvec = torch.cat((context, speaker_vecs), 1)
if self.use_context_lstm:
if self.context_lstm_w_f0_and_energy:
if f0 is not None:
context_w_spkvec = torch.cat((context_w_spkvec, f0), 1)
if energy_avg is not None:
context_w_spkvec = torch.cat(
(context_w_spkvec, energy_avg), 1)
unfolded_out_lens = (out_lens // self.n_group_size).long().cpu()
unfolded_out_lens_packed = nn.utils.rnn.pack_padded_sequence(
context_w_spkvec.transpose(1, 2), unfolded_out_lens,
batch_first=True, enforce_sorted=False)
self.context_lstm.flatten_parameters()
context_lstm_packed_output, _ = self.context_lstm(
unfolded_out_lens_packed)
context_lstm_padded_output, _ = nn.utils.rnn.pad_packed_sequence(
context_lstm_packed_output, batch_first=True)
context_w_spkvec = context_lstm_padded_output.transpose(1, 2)
if not self.context_lstm_w_f0_and_energy:
if f0 is not None:
context_w_spkvec = torch.cat((context_w_spkvec, f0), 1)
if energy_avg is not None:
context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1)
return context_w_spkvec
def enable_inverse_cache(self):
for flow_step in self.flows:
flow_step.enable_inverse_cache()
def fold(self, mel):
"""Inverse of the self.unfold(mel.unsqueeze(-1)) operation used for the
grouping or "squeeze" operation on input
Args:
mel: B x C x T tensor of temporal data
"""
mel = nn.functional.fold(
mel, output_size=(mel.shape[2]*self.n_group_size, 1),
**self.unfold_params).squeeze(-1)
return mel
def binarize_attention(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS. These will
no longer recieve a gradient
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
b_size = attn.shape[0]
with torch.no_grad():
attn_cpu = attn.data.cpu().numpy()
attn_out = torch.zeros_like(attn)
for ind in range(b_size):
hard_attn = mas(attn_cpu[ind, 0, :out_lens[ind], :in_lens[ind]])
attn_out[ind, 0, :out_lens[ind], :in_lens[ind]] = torch.tensor(
hard_attn, device=attn.get_device())
return attn_out
def get_first_order_features(self, feats, out_lens, dilation=1):
"""
feats: b x max_length
out_lens: b-dim
"""
# add an extra column
feats_extended_R = torch.cat(
(feats, torch.zeros_like(feats[:, 0:dilation])), dim=1)
feats_extended_L = torch.cat(
(torch.zeros_like(feats[:, 0:dilation]), feats), dim=1)
dfeats_R = feats_extended_R[:, dilation:] - feats
dfeats_L = feats - feats_extended_L[:, 0:-dilation]
return (dfeats_R + dfeats_L) * 0.5
def apply_voice_mask_to_text(self, text_enc, voiced_mask):
"""
text_enc: b x C x N
voiced_mask: b x N
"""
voiced_mask = voiced_mask.unsqueeze(1)
voiced_embedding_s = self.v_embeddings.weight[0:1, :, None]
unvoiced_embedding_s = self.v_embeddings.weight[1:2, :, None]
voiced_embedding_b = self.v_embeddings.weight[2:3, :, None]
unvoiced_embedding_b = self.v_embeddings.weight[3:4, :, None]
scale = torch.sigmoid(voiced_embedding_s*voiced_mask + unvoiced_embedding_s*(1-voiced_mask))
bias = 0.1*torch.tanh(voiced_embedding_b*voiced_mask + unvoiced_embedding_b*(1-voiced_mask))
return text_enc*scale+bias
def forward(self, mel, speaker_ids, text, in_lens, out_lens,
binarize_attention=False, attn_prior=None,
f0=None, energy_avg=None, voiced_mask=None, p_voiced=None):
speaker_vecs = self.encode_speaker(speaker_ids)
text_enc, text_embeddings = self.encode_text(text, in_lens)
log_s_list, log_det_W_list, z_mel = [], [], []
attn = None
attn_soft = None
attn_hard = None
if 'atn' in self.include_modules or 'dec' in self.include_modules:
# make sure to do the alignments before folding
attn_mask = get_mask_from_lengths(in_lens)[..., None] == 0
text_embeddings_for_attn = text_embeddings
if self.use_speaker_emb_for_alignment:
speaker_vecs_expd = speaker_vecs[:, :, None].expand(
-1, -1, text_embeddings.shape[2])
text_embeddings_for_attn = torch.cat(
(text_embeddings_for_attn, speaker_vecs_expd.detach()), 1)
# attn_mask shld be 1 for unsd t-steps in text_enc_w_spkvec tensor
attn_soft, attn_logprob = self.attention(
mel, text_embeddings_for_attn, out_lens, attn_mask,
key_lens=in_lens, attn_prior=attn_prior)
if binarize_attention:
attn = self.binarize_attention(attn_soft, in_lens, out_lens)
attn_hard = attn
if self.attn_straight_through_estimator:
attn_hard = attn_soft + (attn_hard - attn_soft).detach()
else:
attn = attn_soft
context = torch.bmm(text_enc, attn.squeeze(1).transpose(1, 2))
f0_bias = 0
# unvoiced bias forward pass
if self.use_unvoiced_bias:
f0_bias = self.unvoiced_bias_module(context.permute(0, 2, 1))
f0_bias = -f0_bias[..., 0]
f0_bias = f0_bias * (~voiced_mask.bool()).float()
# mel decoder forward pass
if 'dec' in self.include_modules:
if self.n_group_size > 1:
# might truncate some frames at the end, but that's ok
# sometimes referred to as the "squeeeze" operation
# invert this by calling self.fold(mel_or_z)
mel = self.unfold(mel.unsqueeze(-1))
z_out = []
# where context is folded
# mask f0 in case values are interpolated
if f0 is None:
f0_aug = None
else:
if self.decoder_use_unvoiced_bias:
f0_aug = f0 * voiced_mask + f0_bias
else:
f0_aug = f0 * voiced_mask
context_w_spkvec = self.preprocess_context(
context, speaker_vecs, out_lens, f0_aug,
energy_avg)
log_s_list, log_det_W_list, z_out = [], [], []
unfolded_seq_lens = out_lens//self.n_group_size
for i, flow_step in enumerate(self.flows):
if i in self.exit_steps:
z = mel[:, :self.n_early_size]
z_out.append(z)
mel = mel[:, self.n_early_size:]
mel, log_det_W, log_s = flow_step(
mel, context_w_spkvec, seq_lens=unfolded_seq_lens)
log_s_list.append(log_s)
log_det_W_list.append(log_det_W)
z_out.append(mel)
z_mel = torch.cat(z_out, 1)
# duration predictor forward pass
duration_model_outputs = None
if 'dpm' in self.include_modules:
if attn_hard is None:
attn_hard = self.binarize_attention(
attn_soft, in_lens, out_lens)
# convert hard attention to durations
attn_hard_reduced = attn_hard.sum(2)[:, 0, :]
duration_model_outputs = self.dur_pred_layer(
torch.detach(text_enc),
torch.detach(speaker_vecs),
torch.detach(attn_hard_reduced.float()), in_lens)
# f0, energy, vpred predictors forward pass
f0_model_outputs = None
energy_model_outputs = None
vpred_model_outputs = None
if 'apm' in self.include_modules:
if attn_hard is None:
attn_hard = self.binarize_attention(
attn_soft, in_lens, out_lens)
# convert hard attention to durations
if binarize_attention:
text_enc_time_expanded = context.clone()
else:
text_enc_time_expanded = torch.bmm(
text_enc, attn_hard.squeeze(1).transpose(1, 2))
if self.use_vpred_module:
# unvoiced bias requires voiced mask prediction
vpred_model_outputs = self.v_pred_module(
torch.detach(text_enc_time_expanded),
torch.detach(speaker_vecs),
torch.detach(voiced_mask), out_lens)
# affine transform context using voiced mask
if self.ap_use_voiced_embeddings:
text_enc_time_expanded = self.apply_voice_mask_to_text(
text_enc_time_expanded, voiced_mask)
# whether to use the unvoiced bias in the attribute predictor
# circumvent in-place modification
f0_target = f0.clone()
if self.ap_use_unvoiced_bias:
f0_target = torch.detach(f0_target * voiced_mask + f0_bias)
else:
f0_target = torch.detach(f0_target)
# fit to log f0 in f0 predictor
f0_target[voiced_mask.bool()] = torch.log(
f0_target[voiced_mask.bool()])
f0_target = f0_target / 6 # scale to ~ [0, 1] in log space
energy_avg = energy_avg * 2 - 1 # scale to ~ [-1, 1]
if self.use_first_order_features:
df0 = self.get_first_order_features(f0_target, out_lens)
denergy_avg = self.get_first_order_features(
energy_avg, out_lens)
f0_voiced = torch.cat(
(f0_target[:, None], df0[:, None]), dim=1)
energy_avg = torch.cat(
(energy_avg[:, None], denergy_avg[:, None]), dim=1)
f0_voiced = f0_voiced * 3 # scale to ~ 1 std
energy_avg = energy_avg * 3 # scale to ~ 1 std
else:
f0_voiced = f0_target * 2 # scale to ~ 1 std
energy_avg = energy_avg * 1.4 # scale to ~ 1 std
f0_model_outputs = self.f0_pred_module(
text_enc_time_expanded, torch.detach(speaker_vecs),
f0_voiced, out_lens)
energy_model_outputs = self.energy_pred_module(
text_enc_time_expanded, torch.detach(speaker_vecs),
energy_avg, out_lens)
outputs = {'z_mel': z_mel,
'log_det_W_list': log_det_W_list,
'log_s_list': log_s_list,
'duration_model_outputs': duration_model_outputs,
'f0_model_outputs': f0_model_outputs,
'energy_model_outputs': energy_model_outputs,
'vpred_model_outputs': vpred_model_outputs,
'attn_soft': attn_soft,
'attn': attn,
'text_embeddings': text_embeddings,
'attn_logprob': attn_logprob
}
return outputs
def infer(self, speaker_id, text, sigma, sigma_dur=0.8, sigma_f0=0.8,
sigma_energy=0.8, token_dur_scaling=1.0, token_duration_max=100,
speaker_id_text=None, speaker_id_attributes=None, dur=None,
f0=None, energy_avg=None, voiced_mask=None, f0_mean=0.0,
f0_std=0.0, energy_mean=0.0, energy_std=0.0):
batch_size = text.shape[0]
n_tokens = text.shape[1]
spk_vec = self.encode_speaker(speaker_id)
spk_vec_text, spk_vec_attributes = spk_vec, spk_vec
if speaker_id_text is not None:
spk_vec_text = self.encode_speaker(speaker_id_text)
if speaker_id_attributes is not None:
spk_vec_attributes = self.encode_speaker(speaker_id_attributes)
txt_enc, txt_emb = self.encode_text(text, None)
if dur is None:
# get token durations
z_dur = torch.cuda.FloatTensor(batch_size, 1, n_tokens)
z_dur = z_dur.normal_() * sigma_dur
dur = self.dur_pred_layer.infer(z_dur, txt_enc, spk_vec_text)
if dur.shape[-1] < txt_enc.shape[-1]:
to_pad = txt_enc.shape[-1] - dur.shape[2]
pad_fn = nn.ReplicationPad1d((0, to_pad))
dur = pad_fn(dur)
dur = dur[:, 0]
dur = dur.clamp(0, token_duration_max)
dur = dur * token_dur_scaling if token_dur_scaling > 0 else dur
dur = (dur + 0.5).floor().int()
out_lens = dur.sum(1).long().cpu() if dur.shape[0] != 1 else [dur.sum(1)]
max_n_frames = max(out_lens)
out_lens = torch.LongTensor(out_lens).to(txt_enc.device)
# get attributes f0, energy, vpred, etc)
txt_enc_time_expanded = self.length_regulator(
txt_enc.transpose(1, 2), dur).transpose(1, 2)
if not self.is_attribute_unconditional():
# if explicitly modeling attributes
if voiced_mask is None:
if self.use_vpred_module:
# get logits
voiced_mask = self.v_pred_module.infer(
None, txt_enc_time_expanded, spk_vec_attributes)
voiced_mask = (torch.sigmoid(voiced_mask[:, 0]) > 0.5)
voiced_mask = voiced_mask.float()
ap_txt_enc_time_expanded = txt_enc_time_expanded
# voice mask augmentation only used for attribute prediction
if self.ap_use_voiced_embeddings:
ap_txt_enc_time_expanded = self.apply_voice_mask_to_text(
txt_enc_time_expanded, voiced_mask)
f0_bias = 0
# unvoiced bias forward pass
if self.use_unvoiced_bias:
f0_bias = self.unvoiced_bias_module(
txt_enc_time_expanded.permute(0, 2, 1))
f0_bias = -f0_bias[..., 0]
f0_bias = f0_bias * (~voiced_mask.bool()).float()
if f0 is None:
n_f0_feature_channels = 2 if self.use_first_order_features else 1
z_f0 = torch.cuda.FloatTensor(
batch_size, n_f0_feature_channels, max_n_frames).normal_() * sigma_f0
f0 = self.infer_f0(
z_f0, ap_txt_enc_time_expanded, spk_vec_attributes,
voiced_mask, out_lens)[:, 0]
if f0_mean > 0.0:
vmask_bool = voiced_mask.bool()
f0_mu, f0_sigma = f0[vmask_bool].mean(), f0[vmask_bool].std()
f0[vmask_bool] = (f0[vmask_bool] - f0_mu) / f0_sigma
f0_std = f0_std if f0_std > 0 else f0_sigma
f0[vmask_bool] = f0[vmask_bool] * f0_std + f0_mean
if energy_avg is None:
n_energy_feature_channels = 2 if self.use_first_order_features else 1
z_energy_avg = torch.cuda.FloatTensor(
batch_size, n_energy_feature_channels, max_n_frames).normal_() * sigma_energy
energy_avg = self.infer_energy(
z_energy_avg, ap_txt_enc_time_expanded, spk_vec, out_lens)[:, 0]
# replication pad, because ungrouping with different group sizes
# may lead to mismatched lengths
if energy_avg.shape[1] < out_lens[0]:
to_pad = out_lens[0] - energy_avg.shape[1]
pad_fn = nn.ReplicationPad1d((0, to_pad))
f0 = pad_fn(f0[None])[0]
energy_avg = pad_fn(energy_avg[None])[0]
if f0.shape[1] < out_lens[0]:
to_pad = out_lens[0] - f0.shape[1]
pad_fn = nn.ReplicationPad1d((0, to_pad))
f0 = pad_fn(f0[None])[0]
if self.decoder_use_unvoiced_bias:
context_w_spkvec = self.preprocess_context(
txt_enc_time_expanded, spk_vec, out_lens,
f0 * voiced_mask + f0_bias, energy_avg)
else:
context_w_spkvec = self.preprocess_context(
txt_enc_time_expanded, spk_vec, out_lens, f0*voiced_mask,
energy_avg)
else:
context_w_spkvec = self.preprocess_context(
txt_enc_time_expanded, spk_vec, out_lens, None,
None)
residual = torch.cuda.FloatTensor(
batch_size, 80 * self.n_group_size, max_n_frames // self.n_group_size)
residual = residual.normal_() * sigma
# map from z sample to data
exit_steps_stack = self.exit_steps.copy()
mel = residual[:, len(exit_steps_stack) * self.n_early_size:]
remaining_residual = residual[:, :len(exit_steps_stack)*self.n_early_size]
unfolded_seq_lens = out_lens//self.n_group_size
for i, flow_step in enumerate(reversed(self.flows)):
curr_step = len(self.flows) - i - 1
mel = flow_step(mel, context_w_spkvec, inverse=True, seq_lens=unfolded_seq_lens)
if len(exit_steps_stack) > 0 and curr_step == exit_steps_stack[-1]:
# concatenate the next chunk of z
exit_steps_stack.pop()
residual_to_add = remaining_residual[
:, len(exit_steps_stack)*self.n_early_size:]
remaining_residual = remaining_residual[
:, :len(exit_steps_stack)*self.n_early_size]
mel = torch.cat((residual_to_add, mel), 1)
if self.n_group_size > 1:
mel = self.fold(mel)
if self.do_mel_descaling:
mel = mel * 2 - 5.5
return {'mel': mel,
'dur': dur,
'f0': f0,
'energy_avg': energy_avg,
'voiced_mask': voiced_mask
}
def infer_f0(self, residual, txt_enc_time_expanded, spk_vec,
voiced_mask=None, lens=None):
f0 = self.f0_pred_module.infer(
residual, txt_enc_time_expanded, spk_vec, lens)
if voiced_mask is not None and len(voiced_mask.shape) == 2:
voiced_mask = voiced_mask[:, None]
# constants
if self.ap_pred_log_f0:
if self.use_first_order_features:
f0 = f0[:,0:1,:] / 3
else:
f0 = f0 / 2
f0 = f0 * 6
else:
f0 = f0 / 6
f0 = f0 / 640
if voiced_mask is None:
voiced_mask = f0 > 0.0
else:
voiced_mask = voiced_mask.bool()
# due to grouping, f0 might be 1 frame short
voiced_mask = voiced_mask[:,:,:f0.shape[-1]]
if self.ap_pred_log_f0:
# if variable is set, decoder sees linear f0
# mask = f0 > 0.0 if voiced_mask is None else voiced_mask.bool()
f0[voiced_mask] = torch.exp(f0[voiced_mask])
f0[~voiced_mask] = 0.0
return f0
def infer_energy(self, residual, txt_enc_time_expanded, spk_vec, lens):
energy = self.energy_pred_module.infer(
residual, txt_enc_time_expanded, spk_vec, lens)
# magic constants
if self.use_first_order_features:
energy = energy / 3
else:
energy = energy / 1.4
energy = (energy + 1) / 2
return energy
def remove_norms(self):
"""Removes spectral and weightnorms from model. Call before inference
"""
for name, module in self.named_modules():
try:
nn.utils.remove_spectral_norm(module, name='weight_hh_l0')
print("Removed spectral norm from {}".format(name))
except:
pass
try:
nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')
print("Removed spectral norm from {}".format(name))
except:
pass
try:
nn.utils.remove_weight_norm(module)
print("Removed wnorm from {}".format(name))
except:
pass
| radtts-main | radtts.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import numpy as np
from matplotlib import pylab as plt
from numba import jit
def save_plot(fname, attn_map):
plt.imshow(attn_map)
plt.savefig(fname)
@jit(nopython=True)
def mas_width1(attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_log = log_p[i-1, j]
prev_j = j
if j-1 >= 0 and log_p[i-1, j-1] >= log_p[i-1, j]:
prev_log = log_p[i-1, j-1]
prev_j = j-1
log_p[i, j] = attn_map[i, j] + prev_log
prev_ind[i, j] = prev_j
# now backtrack
curr_text_idx = attn_map.shape[1]-1
for i in range(attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
if __name__ == '__main__':
attn_ = np.load(sys.argv[1])
attn = attn_.squeeze()
save_plot('orig.png', attn)
binarized = mas(attn)
save_plot('binarized.png', binarized)
| radtts-main | alignment.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
import numpy as np
from scipy.signal import get_window
from librosa.filters import mel as librosa_mel_fn
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=None):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(win_length >= filter_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.to(magnitude.device)
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| radtts-main | audio_processing.py |
# Original Source:
# Original Source:
# https://github.com/ndeutschmann/zunis/blob/master/zunis_lib/zunis/models/flows/coupling_cells/piecewise_coupling/piecewise_linear.py
# https://github.com/ndeutschmann/zunis/blob/master/zunis_lib/zunis/models/flows/coupling_cells/piecewise_coupling/piecewise_quadratic.py
# Modifications made to jacobian computation by Yurong You and Kevin Shih
# Original License Text:
#########################################################################
# The MIT License (MIT)
# Copyright (c) 2020, nicolas deutschmann
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import torch.nn.functional as F
third_dimension_softmax = torch.nn.Softmax(dim=2)
def piecewise_linear_transform(x, q_tilde, compute_jacobian=True,
outlier_passthru=True):
"""Apply an element-wise piecewise-linear transformation to some variables
Parameters
----------
x : torch.Tensor
a tensor with shape (N,k) where N is the batch dimension while k is the
dimension of the variable space. This variable span the k-dimensional unit
hypercube
q_tilde: torch.Tensor
is a tensor with shape (N,k,b) where b is the number of bins.
This contains the un-normalized heights of the bins of the piecewise-constant PDF for dimension k,
i.e. q_tilde lives in all of R and we don't impose a constraint on their sum yet.
Normalization is imposed in this function using softmax.
compute_jacobian : bool, optional
determines whether the jacobian should be compute or None is returned
Returns
-------
tuple of torch.Tensor
pair `(y,h)`.
- `y` is a tensor with shape (N,k) living in the k-dimensional unit hypercube
- `j` is the jacobian of the transformation with shape (N,) if compute_jacobian==True, else None.
"""
logj = None
# TODO bottom-up assesment of handling the differentiability of variables
# Compute the bin width w
N, k, b = q_tilde.shape
Nx, kx = x.shape
assert N == Nx and k == kx, "Shape mismatch"
w = 1. / b
# Compute normalized bin heights with softmax function on bin dimension
q = 1. / w * third_dimension_softmax(q_tilde)
# x is in the mx-th bin: x \in [0,1],
# mx \in [[0,b-1]], so we clamp away the case x == 1
mx = torch.clamp(torch.floor(b * x), 0, b - 1).to(torch.long)
# Need special error handling because trying to index with mx
# if it contains nans will lock the GPU. (device-side assert triggered)
if torch.any(torch.isnan(mx)).item() or torch.any(mx < 0) or torch.any(mx >= b):
raise AvertedCUDARuntimeError("NaN detected in PWLinear bin indexing")
# We compute the output variable in-place
out = x - mx * w # alpha (element of [0.,w], the position of x in its bin
# Multiply by the slope
# q has shape (N,k,b), mxu = mx.unsqueeze(-1) has shape (N,k) with entries that are a b-index
# gather defines slope[i, j, k] = q[i, j, mxu[i, j, k]] with k taking only 0 as a value
# i.e. we say slope[i, j] = q[i, j, mx [i, j]]
slopes = torch.gather(q, 2, mx.unsqueeze(-1)).squeeze(-1)
out = out * slopes
# The jacobian is the product of the slopes in all dimensions
# Compute the integral over the left-bins.
# 1. Compute all integrals: cumulative sum of bin height * bin weight.
# We want that index i contains the cumsum *strictly to the left* so we shift by 1
# leaving the first entry null, which is achieved with a roll and assignment
q_left_integrals = torch.roll(torch.cumsum(q, 2) * w, 1, 2)
q_left_integrals[:, :, 0] = 0
# 2. Access the correct index to get the left integral of each point and add it to our transformation
out = out + torch.gather(q_left_integrals, 2, mx.unsqueeze(-1)).squeeze(-1)
# Regularization: points must be strictly within the unit hypercube
# Use the dtype information from pytorch
eps = torch.finfo(out.dtype).eps
out = out.clamp(
min=eps,
max=1. - eps
)
oob_mask = torch.logical_or(x < 0.0, x >1.0).detach().float()
if outlier_passthru:
out = out * (1-oob_mask) + x * oob_mask
slopes = slopes * (1-oob_mask) + oob_mask
if compute_jacobian:
#logj = torch.log(torch.prod(slopes.float(), 1))
logj = torch.sum(torch.log(slopes), 1)
del slopes
return out, logj
def piecewise_linear_inverse_transform(y, q_tilde, compute_jacobian=True,
outlier_passthru=True):
"""
Apply inverse of an element-wise piecewise-linear transformation to some
variables
Parameters
----------
y : torch.Tensor
a tensor with shape (N,k) where N is the batch dimension while k is the
dimension of the variable space. This variable span the k-dimensional unit
hypercube
q_tilde: torch.Tensor
is a tensor with shape (N,k,b) where b is the number of bins.
This contains the un-normalized heights of the bins of the piecewise-constant PDF for dimension k,
i.e. q_tilde lives in all of R and we don't impose a constraint on their sum yet.
Normalization is imposed in this function using softmax.
compute_jacobian : bool, optional
determines whether the jacobian should be compute or None is returned
Returns
-------
tuple of torch.Tensor
pair `(x,h)`.
- `x` is a tensor with shape (N,k) living in the k-dimensional unit hypercube
- `j` is the jacobian of the transformation with shape (N,) if compute_jacobian==True, else None.
"""
# TODO bottom-up assesment of handling the differentiability of variables
# Compute the bin width w
N, k, b = q_tilde.shape
Ny, ky = y.shape
assert N == Ny and k == ky, "Shape mismatch"
w = 1. / b
# Compute normalized bin heights with softmax function on the bin dimension
q = 1. / w * third_dimension_softmax(q_tilde)
# Compute the integral over the left-bins in the forward transform.
# 1. Compute all integrals: cumulative sum of bin height * bin weight.
# We want that index i contains the cumsum *strictly to the left*,
# so we shift by 1 leaving the first entry null,
# which is achieved with a roll and assignment
q_left_integrals = torch.roll(torch.cumsum(q.float(), 2) * w, 1, 2)
q_left_integrals[:, :, 0] = 0
# Find which bin each y belongs to by finding the smallest bin such that
# y - q_left_integral is positive
edges = (y.unsqueeze(-1) - q_left_integrals).detach()
# y and q_left_integrals are between 0 and 1,
# so that their difference is at most 1.
# By setting the negative values to 2., we know that the
# smallest value left is the smallest positive
edges[edges < 0] = 2.
edges = torch.clamp(torch.argmin(edges, dim=2), 0, b - 1).to(torch.long)
# Need special error handling because trying to index with mx
# if it contains nans will lock the GPU. (device-side assert triggered)
if torch.any(torch.isnan(edges)).item() or torch.any(edges < 0) or torch.any(edges >= b):
raise AvertedCUDARuntimeError("NaN detected in PWLinear bin indexing")
# Gather the left integrals at each edge. See comment about gathering in q_left_integrals
# for the unsqueeze
q_left_integrals = q_left_integrals.gather(2, edges.unsqueeze(-1)).squeeze(-1)
# Gather the slope at each edge.
q = q.gather(2, edges.unsqueeze(-1)).squeeze(-1)
# Build the output
x = (y - q_left_integrals) / q + edges * w
# Regularization: points must be strictly within the unit hypercube
# Use the dtype information from pytorch
eps = torch.finfo(x.dtype).eps
x = x.clamp(
min=eps,
max=1. - eps
)
oob_mask = torch.logical_or(y < 0.0, y >1.0).detach().float()
if outlier_passthru:
x = x * (1-oob_mask) + y * oob_mask
q = q * (1-oob_mask) + oob_mask
# Prepare the jacobian
logj = None
if compute_jacobian:
#logj = - torch.log(torch.prod(q, 1))
logj = -torch.sum(torch.log(q.float()), 1)
return x.detach(), logj
def unbounded_piecewise_quadratic_transform(x, w_tilde, v_tilde, upper=1,
lower=0, inverse=False):
assert upper > lower
_range = upper - lower
inside_interval_mask = (x >= lower) & (x < upper)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(x)
log_j = torch.zeros_like(x)
outputs[outside_interval_mask] = x[outside_interval_mask]
log_j[outside_interval_mask] = 0
output, _log_j = piecewise_quadratic_transform(
(x[inside_interval_mask] - lower) / _range,
w_tilde[inside_interval_mask, :],
v_tilde[inside_interval_mask, :],
inverse=inverse)
outputs[inside_interval_mask] = output * _range + lower
if not inverse:
# the before and after transformation cancel out, so the log_j would be just as it is.
log_j[inside_interval_mask] = _log_j
else:
log_j = None
return outputs, log_j
def weighted_softmax(v, w):
# to avoid NaN...
v = v - torch.max(v, dim=-1, keepdim=True)[0]
v = torch.exp(v) + 1e-8 # to avoid NaN...
v_sum = torch.sum((v[..., :-1] + v[..., 1:]) / 2 * w, dim=-1, keepdim=True)
return v / v_sum
def piecewise_quadratic_transform(x, w_tilde, v_tilde, inverse=False):
"""Element-wise piecewise-quadratic transformation
Parameters
----------
x : torch.Tensor
*, The variable spans the D-dim unit hypercube ([0,1))
w_tilde : torch.Tensor
* x K defined in the paper
v_tilde : torch.Tensor
* x (K+1) defined in the paper
inverse : bool
forward or inverse
Returns
-------
c : torch.Tensor
*, transformed value
log_j : torch.Tensor
*, log determinant of the Jacobian matrix
"""
w = torch.softmax(w_tilde, dim=-1)
v = weighted_softmax(v_tilde, w)
w_cumsum = torch.cumsum(w, dim=-1)
# force sum = 1
w_cumsum[..., -1] = 1.
w_cumsum_shift = F.pad(w_cumsum, (1,0), 'constant', 0)
cdf = torch.cumsum((v[..., 1:] + v[..., :-1]) / 2 * w, dim=-1)
# force sum = 1
cdf[..., -1] = 1.
cdf_shift = F.pad(cdf, (1,0), 'constant', 0)
if not inverse:
# * x D x 1, (w_cumsum[idx-1] < x <= w_cumsum[idx])
bin_index = torch.searchsorted(w_cumsum, x.unsqueeze(-1))
else:
# * x D x 1, (cdf[idx-1] < x <= cdf[idx])
bin_index = torch.searchsorted(cdf, x.unsqueeze(-1))
w_b = torch.gather(w, -1, bin_index).squeeze(-1)
w_bn1 = torch.gather(w_cumsum_shift, -1, bin_index).squeeze(-1)
v_b = torch.gather(v, -1, bin_index).squeeze(-1)
v_bp1 = torch.gather(v, -1, bin_index + 1).squeeze(-1)
cdf_bn1 = torch.gather(cdf_shift, -1, bin_index).squeeze(-1)
if not inverse:
alpha = (x - w_bn1) / w_b.clamp(min=torch.finfo(w_b.dtype).eps)
c = (alpha ** 2) / 2 * (v_bp1 - v_b) * w_b + alpha * v_b * w_b + cdf_bn1
# just sum of log pdfs
log_j = torch.lerp(v_b, v_bp1, alpha).clamp(min=torch.finfo(c.dtype).eps).log()
# make sure it falls into [0,1)
c = c.clamp(min=torch.finfo(c.dtype).eps, max=1. - torch.finfo(c.dtype).eps)
return c, log_j
else:
# quadratic equation for alpha
# alpha should fall into (0, 1]. Since a, b > 0, the symmetry axis -b/2a < 0 and we should pick the larger root
# skip calculating the log_j in inverse since we don't need it
a = (v_bp1 - v_b) * w_b / 2
b = v_b * w_b
c = cdf_bn1 - x
alpha = (-b + torch.sqrt((b**2) - 4 * a * c)) / (2 * a)
inv = alpha * w_b + w_bn1
# make sure it falls into [0,1)
inv = inv.clamp(min=torch.finfo(c.dtype).eps, max=1. - torch.finfo(inv.dtype).eps)
return inv, None
| radtts-main | splines.py |
# original source takes from https://github.com/jik876/hifi-gan/
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
| radtts-main | hifigan_utils.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
import torch.nn as nn
from torch.nn import functional as F
from common import get_mask_from_lengths
def compute_flow_loss(z, log_det_W_list, log_s_list, n_elements, n_dims, mask,
sigma=1.0):
log_det_W_total = 0.0
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s * mask)
if len(log_det_W_list):
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s * mask)
if len(log_det_W_list):
log_det_W_total += log_det_W_list[i]
if len(log_det_W_list):
log_det_W_total *= n_elements
z = z * mask
prior_NLL = torch.sum(z*z)/(2*sigma*sigma)
loss = prior_NLL - log_s_total - log_det_W_total
denom = n_elements * n_dims
loss = loss / denom
loss_prior = prior_NLL / denom
return loss, loss_prior
def compute_regression_loss(x_hat, x, mask, name=False):
x = x[:, None] if len(x.shape) == 2 else x # add channel dim
mask = mask[:, None] if len(mask.shape) == 2 else mask # add channel dim
assert len(x.shape) == len(mask.shape)
x = x * mask
x_hat = x_hat * mask
if name == 'vpred':
loss = F.binary_cross_entropy_with_logits(x_hat, x, reduction='sum')
else:
loss = F.mse_loss(x_hat, x, reduction='sum')
loss = loss / mask.sum()
loss_dict = {"loss_{}".format(name): loss}
return loss_dict
class AttributePredictionLoss(torch.nn.Module):
def __init__(self, name, model_config, loss_weight, sigma=1.0):
super(AttributePredictionLoss, self).__init__()
self.name = name
self.sigma = sigma
self.model_name = model_config['name']
self.loss_weight = loss_weight
self.n_group_size = 1
if 'n_group_size' in model_config['hparams']:
self.n_group_size = model_config['hparams']['n_group_size']
def forward(self, model_output, lens):
mask = get_mask_from_lengths(lens // self.n_group_size)
mask = mask[:, None].float()
loss_dict = {}
if 'z' in model_output:
n_elements = lens.sum() // self.n_group_size
n_dims = model_output['z'].size(1)
loss, loss_prior = compute_flow_loss(
model_output['z'], model_output['log_det_W_list'],
model_output['log_s_list'], n_elements, n_dims, mask,
self.sigma)
loss_dict = {"loss_{}".format(self.name): (loss, self.loss_weight),
"loss_prior_{}".format(self.name): (loss_prior, 0.0)}
elif 'x_hat' in model_output:
loss_dict = compute_regression_loss(
model_output['x_hat'], model_output['x'], mask, self.name)
for k, v in loss_dict.items():
loss_dict[k] = (v, self.loss_weight)
if len(loss_dict) == 0:
raise Exception("loss not supported")
return loss_dict
class AttentionCTCLoss(torch.nn.Module):
def __init__(self, blank_logprob=-1):
super(AttentionCTCLoss, self).__init__()
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.blank_logprob = blank_logprob
self.CTCLoss = nn.CTCLoss(zero_infinity=True)
def forward(self, attn_logprob, in_lens, out_lens):
key_lens = in_lens
query_lens = out_lens
attn_logprob_padded = F.pad(
input=attn_logprob, pad=(1, 0, 0, 0, 0, 0, 0, 0),
value=self.blank_logprob)
cost_total = 0.0
for bid in range(attn_logprob.shape[0]):
target_seq = torch.arange(1, key_lens[bid]+1).unsqueeze(0)
curr_logprob = attn_logprob_padded[bid].permute(1, 0, 2)[
:query_lens[bid], :, :key_lens[bid]+1]
curr_logprob = self.log_softmax(curr_logprob[None])[0]
ctc_cost = self.CTCLoss(curr_logprob, target_seq,
input_lengths=query_lens[bid:bid+1],
target_lengths=key_lens[bid:bid+1])
cost_total += ctc_cost
cost = cost_total/attn_logprob.shape[0]
return cost
class AttentionBinarizationLoss(torch.nn.Module):
def __init__(self):
super(AttentionBinarizationLoss, self).__init__()
def forward(self, hard_attention, soft_attention):
log_sum = torch.log(soft_attention[hard_attention == 1]).sum()
return -log_sum / hard_attention.sum()
class RADTTSLoss(torch.nn.Module):
def __init__(self, sigma=1.0, n_group_size=1, dur_model_config=None,
f0_model_config=None, energy_model_config=None,
vpred_model_config=None, loss_weights=None):
super(RADTTSLoss, self).__init__()
self.sigma = sigma
self.n_group_size = n_group_size
self.loss_weights = loss_weights
self.attn_ctc_loss = AttentionCTCLoss(
blank_logprob=loss_weights.get('blank_logprob', -1))
self.loss_fns = {}
if dur_model_config is not None:
self.loss_fns['duration_model_outputs'] = AttributePredictionLoss(
'duration', dur_model_config, loss_weights['dur_loss_weight'])
if f0_model_config is not None:
self.loss_fns['f0_model_outputs'] = AttributePredictionLoss(
'f0', f0_model_config, loss_weights['f0_loss_weight'],
sigma=1.0)
if energy_model_config is not None:
self.loss_fns['energy_model_outputs'] = AttributePredictionLoss(
'energy',
energy_model_config, loss_weights['energy_loss_weight'])
if vpred_model_config is not None:
self.loss_fns['vpred_model_outputs'] = AttributePredictionLoss(
'vpred', vpred_model_config, loss_weights['vpred_loss_weight'])
def forward(self, model_output, in_lens, out_lens):
loss_dict = {}
if len(model_output['z_mel']):
n_elements = out_lens.sum() // self.n_group_size
mask = get_mask_from_lengths(out_lens // self.n_group_size)
mask = mask[:, None].float()
n_dims = model_output['z_mel'].size(1)
loss_mel, loss_prior_mel = compute_flow_loss(
model_output['z_mel'], model_output['log_det_W_list'],
model_output['log_s_list'], n_elements, n_dims, mask,
self.sigma)
loss_dict['loss_mel'] = (loss_mel, 1.0) # loss, weight
loss_dict['loss_prior_mel'] = (loss_prior_mel, 0.0)
ctc_cost = self.attn_ctc_loss(
model_output['attn_logprob'], in_lens, out_lens)
loss_dict['loss_ctc'] = (
ctc_cost, self.loss_weights['ctc_loss_weight'])
for k in model_output:
if k in self.loss_fns:
if model_output[k] is not None and len(model_output[k]) > 0:
t_lens = in_lens if 'dur' in k else out_lens
mout = model_output[k]
for loss_name, v in self.loss_fns[k](mout, t_lens).items():
loss_dict[loss_name] = v
return loss_dict
| radtts-main | loss.py |
# Original source: https://github.com/NVIDIA/waveglow/blob/master/distributed.py
#
# Original license text:
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import torch
import torch.distributed as dist
from torch.autograd import Variable
def reduce_tensor(tensor, num_gpus, reduce_dst=None):
if num_gpus <= 1: # pass-thru
return tensor
rt = tensor.clone()
if reduce_dst is not None:
dist.reduce(rt, reduce_dst, op=dist.ReduceOp.SUM)
else:
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= num_gpus
return rt
def init_distributed(rank, num_gpus, dist_backend, dist_url):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print('> initializing distributed for rank {} out '
'of {}'.format(rank, num_gpus))
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend='nccl',
world_size=num_gpus,
rank=rank,
init_method=init_method)
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def apply_gradient_allreduce(module):
"""
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
"""
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
dir(param)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
| radtts-main | distributed.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# 1x1InvertibleConv and WN based on implementation from WaveGlow https://github.com/NVIDIA/waveglow/blob/master/glow.py
# Original license:
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch import nn
from torch.nn import functional as F
from torch.cuda import amp
from torch.cuda.amp import autocast as autocast
import numpy as np
import ast
from splines import (piecewise_linear_transform,
piecewise_linear_inverse_transform,
unbounded_piecewise_quadratic_transform)
from partialconv1d import PartialConv1d as pconv1d
from typing import Tuple
def update_params(config, params):
for param in params:
print(param)
k, v = param.split("=")
try:
v = ast.literal_eval(v)
except:
pass
k_split = k.split('.')
if len(k_split) > 1:
parent_k = k_split[0]
cur_param = ['.'.join(k_split[1:])+"="+str(v)]
update_params(config[parent_k], cur_param)
elif k in config and len(k_split) == 1:
print(f"overriding {k} with {v}")
config[k] = v
else:
print("{}, {} params not updated".format(k, v))
def get_mask_from_lengths(lengths):
"""Constructs binary mask from a 1D torch tensor of input lengths
Args:
lengths (torch.tensor): 1D tensor
Returns:
mask (torch.tensor): num_sequences x max_length x 1 binary tensor
"""
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
class ExponentialClass(torch.nn.Module):
def __init__(self):
super(ExponentialClass, self).__init__()
def forward(self, x):
return torch.exp(x)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear',
use_partial_padding=False, use_weight_norm=False):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.kernel_size = kernel_size
self.dilation = dilation
self.use_partial_padding = use_partial_padding
self.use_weight_norm = use_weight_norm
conv_fn = torch.nn.Conv1d
if self.use_partial_padding:
conv_fn = pconv1d
self.conv = conv_fn(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
if self.use_weight_norm:
self.conv = nn.utils.weight_norm(self.conv)
def forward(self, signal, mask=None):
if self.use_partial_padding:
conv_signal = self.conv(signal, mask)
else:
conv_signal = self.conv(signal)
if mask is not None:
# always re-zero output if mask is
# available to match zero-padding
conv_signal = conv_signal * mask
return conv_signal
class DenseLayer(nn.Module):
def __init__(self, in_dim=1024, sizes=[1024, 1024]):
super(DenseLayer, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=True)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = torch.tanh(linear(x))
return x
class LengthRegulator(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dur):
output = []
for x_i, dur_i in zip(x, dur):
expanded = self.expand(x_i, dur_i)
output.append(expanded)
output = self.pad(output)
return output
def expand(self, x, dur):
output = []
for i, frame in enumerate(x):
expanded_len = int(dur[i] + 0.5)
expanded = frame.expand(expanded_len, -1)
output.append(expanded)
output = torch.cat(output, 0)
return output
def pad(self, x):
output = []
max_len = max([x[i].size(0) for i in range(len(x))])
for i, seq in enumerate(x):
padded = F.pad(
seq, [0, 0, 0, max_len - seq.size(0)], 'constant', 0.0)
output.append(padded)
output = torch.stack(output)
return output
class ConvLSTMLinear(nn.Module):
def __init__(self, in_dim, out_dim, n_layers=2, n_channels=256,
kernel_size=3, p_dropout=0.1, lstm_type='bilstm',
use_linear=True):
super(ConvLSTMLinear, self).__init__()
self.out_dim = out_dim
self.lstm_type = lstm_type
self.use_linear = use_linear
self.dropout = nn.Dropout(p=p_dropout)
convolutions = []
for i in range(n_layers):
conv_layer = ConvNorm(
in_dim if i == 0 else n_channels, n_channels,
kernel_size=kernel_size, stride=1,
padding=int((kernel_size - 1) / 2), dilation=1,
w_init_gain='relu')
conv_layer = torch.nn.utils.weight_norm(
conv_layer.conv, name='weight')
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
if not self.use_linear:
n_channels = out_dim
if self.lstm_type != '':
use_bilstm = False
lstm_channels = n_channels
if self.lstm_type == 'bilstm':
use_bilstm = True
lstm_channels = int(n_channels // 2)
self.bilstm = nn.LSTM(n_channels, lstm_channels, 1,
batch_first=True, bidirectional=use_bilstm)
lstm_norm_fn_pntr = nn.utils.spectral_norm
self.bilstm = lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0')
if self.lstm_type == 'bilstm':
self.bilstm = lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0_reverse')
if self.use_linear:
self.dense = nn.Linear(n_channels, out_dim)
def run_padded_sequence(self, context, lens):
context_embedded = []
for b_ind in range(context.size()[0]): # TODO: speed up
curr_context = context[b_ind:b_ind+1, :, :lens[b_ind]].clone()
for conv in self.convolutions:
curr_context = self.dropout(F.relu(conv(curr_context)))
context_embedded.append(curr_context[0].transpose(0, 1))
context = torch.nn.utils.rnn.pad_sequence(
context_embedded, batch_first=True)
return context
def run_unsorted_inputs(self, fn, context, lens):
lens_sorted, ids_sorted = torch.sort(lens, descending=True)
unsort_ids = [0] * lens.size(0)
for i in range(len(ids_sorted)):
unsort_ids[ids_sorted[i]] = i
lens_sorted = lens_sorted.long().cpu()
context = context[ids_sorted]
context = nn.utils.rnn.pack_padded_sequence(
context, lens_sorted, batch_first=True)
context = fn(context)[0]
context = nn.utils.rnn.pad_packed_sequence(
context, batch_first=True)[0]
# map back to original indices
context = context[unsort_ids]
return context
def forward(self, context, lens):
if context.size()[0] > 1:
context = self.run_padded_sequence(context, lens)
# to B, D, T
context = context.transpose(1, 2)
else:
for conv in self.convolutions:
context = self.dropout(F.relu(conv(context)))
if self.lstm_type != '':
context = context.transpose(1, 2)
self.bilstm.flatten_parameters()
if lens is not None:
context = self.run_unsorted_inputs(self.bilstm, context, lens)
else:
context = self.bilstm(context)[0]
context = context.transpose(1, 2)
x_hat = context
if self.use_linear:
x_hat = self.dense(context.transpose(1, 2)).transpose(1, 2)
return x_hat
def infer(self, z, txt_enc, spk_emb):
x_hat = self.forward(txt_enc, spk_emb)['x_hat']
x_hat = self.feature_processing.denormalize(x_hat)
return x_hat
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions=3, encoder_embedding_dim=512,
encoder_kernel_size=5, norm_fn=nn.BatchNorm1d,
lstm_norm_fn=None):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu',
use_partial_padding=True),
norm_fn(encoder_embedding_dim, affine=True))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
if lstm_norm_fn is not None:
if 'spectral' in lstm_norm_fn:
print("Applying spectral norm to text encoder LSTM")
lstm_norm_fn_pntr = torch.nn.utils.spectral_norm
elif 'weight' in lstm_norm_fn:
print("Applying weight norm to text encoder LSTM")
lstm_norm_fn_pntr = torch.nn.utils.weight_norm
self.lstm = lstm_norm_fn_pntr(self.lstm, 'weight_hh_l0')
self.lstm = lstm_norm_fn_pntr(self.lstm, 'weight_hh_l0_reverse')
@amp.autocast(False)
def forward(self, x, in_lens):
"""
Args:
x (torch.tensor): N x C x L padded input of text embeddings
in_lens (torch.tensor): 1D tensor of sequence lengths
"""
if x.size()[0] > 1:
x_embedded = []
for b_ind in range(x.size()[0]): # TODO: improve speed
curr_x = x[b_ind:b_ind+1, :, :in_lens[b_ind]].clone()
for conv in self.convolutions:
curr_x = F.dropout(F.relu(conv(curr_x)),
0.5, self.training)
x_embedded.append(curr_x[0].transpose(0, 1))
x = torch.nn.utils.rnn.pad_sequence(x_embedded, batch_first=True)
else:
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# recent amp change -- change in_lens to int
in_lens = in_lens.int().cpu()
x = nn.utils.rnn.pack_padded_sequence(x, in_lens, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
@amp.autocast(False)
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c, cache_inverse=False):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1*W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
self.cache_inverse = cache_inverse
@amp.autocast(False)
def forward(self, z, inverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if inverse:
if not hasattr(self, 'W_inverse'):
# inverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
if not self.cache_inverse:
delattr(self, 'W_inverse')
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If inverse=True it does convolution with
inverse
"""
def __init__(self, c, cache_inverse=False):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1*W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
self.cache_inverse = cache_inverse
def forward(self, z, inverse=False):
# DO NOT apply n_of_groups, as it doesn't account for padded sequences
W = self.conv.weight.squeeze()
if inverse:
if not hasattr(self, 'W_inverse'):
# Inverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
if not self.cache_inverse:
delattr(self, 'W_inverse')
return z
else:
# Forward computation
log_det_W = torch.logdet(W).clone()
z = self.conv(z)
return z, log_det_W
class SimpleConvNet(torch.nn.Module):
def __init__(self, n_mel_channels, n_context_dim, final_out_channels,
n_layers=2, kernel_size=5, with_dilation=True,
max_channels=1024, zero_init=True, use_partial_padding=True):
super(SimpleConvNet, self).__init__()
self.layers = torch.nn.ModuleList()
self.n_layers = n_layers
in_channels = n_mel_channels + n_context_dim
out_channels = -1
self.use_partial_padding = use_partial_padding
for i in range(n_layers):
dilation = 2 ** i if with_dilation else 1
padding = int((kernel_size*dilation - dilation)/2)
out_channels = min(max_channels, in_channels * 2)
self.layers.append(ConvNorm(in_channels, out_channels,
kernel_size=kernel_size, stride=1,
padding=padding, dilation=dilation,
bias=True, w_init_gain='relu',
use_partial_padding=use_partial_padding))
in_channels = out_channels
self.last_layer = torch.nn.Conv1d(
out_channels, final_out_channels, kernel_size=1)
if zero_init:
self.last_layer.weight.data *= 0
self.last_layer.bias.data *= 0
def forward(self, z_w_context, seq_lens: torch.Tensor = None):
# seq_lens: tensor array of sequence sequence lengths
# output should be b x n_mel_channels x z_w_context.shape(2)
mask = None
if seq_lens is not None:
mask = get_mask_from_lengths(seq_lens).unsqueeze(1).float()
for i in range(self.n_layers):
z_w_context = self.layers[i](z_w_context, mask)
z_w_context = torch.relu(z_w_context)
z_w_context = self.last_layer(z_w_context)
return z_w_context
class WN(torch.nn.Module):
"""
Adapted from WN() module in WaveGlow with modififcations to variable names
"""
def __init__(self, n_in_channels, n_context_dim, n_layers, n_channels,
kernel_size=5, affine_activation='softplus',
use_partial_padding=True):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels+n_context_dim, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
self.softplus = torch.nn.Softplus()
self.affine_activation = affine_activation
self.use_partial_padding = use_partial_padding
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size*dilation - dilation)/2)
in_layer = ConvNorm(n_channels, n_channels, kernel_size=kernel_size,
dilation=dilation, padding=padding,
use_partial_padding=use_partial_padding,
use_weight_norm=True)
# in_layer = nn.Conv1d(n_channels, n_channels, kernel_size,
# dilation=dilation, padding=padding)
# in_layer = nn.utils.weight_norm(in_layer)
self.in_layers.append(in_layer)
res_skip_layer = nn.Conv1d(n_channels, n_channels, 1)
res_skip_layer = nn.utils.weight_norm(res_skip_layer)
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input: Tuple[torch.Tensor, torch.Tensor], seq_lens: torch.Tensor = None):
z, context = forward_input
z = torch.cat((z, context), 1) # append context to z as well
z = self.start(z)
output = torch.zeros_like(z)
mask = None
if seq_lens is not None:
mask = get_mask_from_lengths(seq_lens).unsqueeze(1).float()
non_linearity = torch.relu
if self.affine_activation == 'softplus':
non_linearity = self.softplus
for i in range(self.n_layers):
z = non_linearity(self.in_layers[i](z, mask))
res_skip_acts = non_linearity(self.res_skip_layers[i](z))
output = output + res_skip_acts
output = self.end(output) # [B, dim, seq_len]
return output
# Affine Coupling Layers
class SplineTransformationLayerAR(torch.nn.Module):
def __init__(self, n_in_channels, n_context_dim, n_layers,
affine_model='simple_conv', kernel_size=1, scaling_fn='exp',
affine_activation='softplus', n_channels=1024, n_bins=8,
left=-6, right=6, bottom=-6, top=6, use_quadratic=False):
super(SplineTransformationLayerAR, self).__init__()
self.n_in_channels = n_in_channels # input dimensions
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.n_bins = n_bins
self.spline_fn = piecewise_linear_transform
self.inv_spline_fn = piecewise_linear_inverse_transform
self.use_quadratic = use_quadratic
if self.use_quadratic:
self.spline_fn = unbounded_piecewise_quadratic_transform
self.inv_spline_fn = unbounded_piecewise_quadratic_transform
self.n_bins = 2 * self.n_bins + 1
final_out_channels = self.n_in_channels * self.n_bins
# autoregressive flow, kernel size 1 and no dilation
self.param_predictor = SimpleConvNet(
n_context_dim, 0, final_out_channels, n_layers,
with_dilation=False, kernel_size=1, zero_init=True,
use_partial_padding=False)
# output is unnormalized bin weights
def normalize(self, z, inverse):
# normalize to [0, 1]
if inverse:
z = (z - self.bottom) / (self.top - self.bottom)
else:
z = (z - self.left) / (self.right - self.left)
return z
def denormalize(self, z, inverse):
if inverse:
z = z * (self.right - self.left) + self.left
else:
z = z * (self.top - self.bottom) + self.bottom
return z
def forward(self, z, context, inverse=False):
b_s, c_s, t_s = z.size(0), z.size(1), z.size(2)
z = self.normalize(z, inverse)
if z.min() < 0.0 or z.max() > 1.0:
print('spline z scaled beyond [0, 1]', z.min(), z.max())
z_reshaped = z.permute(0, 2, 1).reshape(b_s * t_s, -1)
affine_params = self.param_predictor(context)
q_tilde = affine_params.permute(0, 2, 1).reshape(b_s * t_s, c_s, -1)
with amp.autocast(enabled=False):
if self.use_quadratic:
w = q_tilde[:, :, :self.n_bins // 2]
v = q_tilde[:, :, self.n_bins // 2:]
z_tformed, log_s = self.spline_fn(
z_reshaped.float(), w.float(), v.float(), inverse=inverse)
else:
z_tformed, log_s = self.spline_fn(
z_reshaped.float(), q_tilde.float())
z = z_tformed.reshape(b_s, t_s, -1).permute(0, 2, 1)
z = self.denormalize(z, inverse)
if inverse:
return z
log_s = log_s.reshape(b_s, t_s, -1)
log_s = log_s.permute(0, 2, 1)
log_s = log_s + c_s * (np.log(self.top - self.bottom) -
np.log(self.right - self.left))
return z, log_s
class SplineTransformationLayer(torch.nn.Module):
def __init__(self, n_mel_channels, n_context_dim, n_layers,
with_dilation=True, kernel_size=5,
scaling_fn='exp', affine_activation='softplus',
n_channels=1024, n_bins=8, left=-4, right=4, bottom=-4, top=4,
use_quadratic=False):
super(SplineTransformationLayer, self).__init__()
self.n_mel_channels = n_mel_channels # input dimensions
self.half_mel_channels = int(n_mel_channels/2) # half, because we split
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.n_bins = n_bins
self.spline_fn = piecewise_linear_transform
self.inv_spline_fn = piecewise_linear_inverse_transform
self.use_quadratic = use_quadratic
if self.use_quadratic:
self.spline_fn = unbounded_piecewise_quadratic_transform
self.inv_spline_fn = unbounded_piecewise_quadratic_transform
self.n_bins = 2*self.n_bins+1
final_out_channels = self.half_mel_channels*self.n_bins
self.param_predictor = SimpleConvNet(
self.half_mel_channels, n_context_dim, final_out_channels,
n_layers, with_dilation=with_dilation, kernel_size=kernel_size,
zero_init=False)
# output is unnormalized bin weights
def forward(self, z, context, inverse=False, seq_lens=None):
b_s, c_s, t_s = z.size(0), z.size(1), z.size(2)
# condition on z_0, transform z_1
n_half = self.half_mel_channels
z_0, z_1 = z[:, :n_half], z[:, n_half:]
# normalize to [0,1]
if inverse:
z_1 = (z_1 - self.bottom)/(self.top - self.bottom)
else:
z_1 = (z_1 - self.left)/(self.right - self.left)
z_w_context = torch.cat((z_0, context), 1)
affine_params = self.param_predictor(z_w_context, seq_lens)
z_1_reshaped = z_1.permute(0, 2, 1).reshape(b_s*t_s, -1)
q_tilde = affine_params.permute(0, 2, 1).reshape(
b_s*t_s, n_half, self.n_bins)
with autocast(enabled=False):
if self.use_quadratic:
w = q_tilde[:, :, :self.n_bins//2]
v = q_tilde[:, :, self.n_bins//2:]
z_1_tformed, log_s = self.spline_fn(
z_1_reshaped.float(), w.float(), v.float(),
inverse=inverse)
if not inverse:
log_s = torch.sum(log_s, 1)
else:
if inverse:
z_1_tformed, _dc = self.inv_spline_fn(
z_1_reshaped.float(), q_tilde.float(), False)
else:
z_1_tformed, log_s = self.spline_fn(
z_1_reshaped.float(), q_tilde.float())
z_1 = z_1_tformed.reshape(b_s, t_s, -1).permute(0, 2, 1)
# undo [0, 1] normalization
if inverse:
z_1 = z_1 * (self.right - self.left) + self.left
z = torch.cat((z_0, z_1), dim=1)
return z
else: # training
z_1 = z_1 * (self.top - self.bottom) + self.bottom
z = torch.cat((z_0, z_1), dim=1)
log_s = log_s.reshape(b_s, t_s).unsqueeze(1) + \
n_half*(np.log(self.top - self.bottom) -
np.log(self.right-self.left))
return z, log_s
class AffineTransformationLayer(torch.nn.Module):
def __init__(self, n_mel_channels, n_context_dim, n_layers,
affine_model='simple_conv', with_dilation=True, kernel_size=5,
scaling_fn='exp', affine_activation='softplus',
n_channels=1024, use_partial_padding=False):
super(AffineTransformationLayer, self).__init__()
if affine_model not in ("wavenet", "simple_conv"):
raise Exception("{} affine model not supported".format(affine_model))
if isinstance(scaling_fn, list):
if not all([x in ("translate", "exp", "tanh", "sigmoid") for x in scaling_fn]):
raise Exception("{} scaling fn not supported".format(scaling_fn))
else:
if scaling_fn not in ("translate", "exp", "tanh", "sigmoid"):
raise Exception("{} scaling fn not supported".format(scaling_fn))
self.affine_model = affine_model
self.scaling_fn = scaling_fn
if affine_model == 'wavenet':
self.affine_param_predictor = WN(
int(n_mel_channels/2), n_context_dim, n_layers=n_layers,
n_channels=n_channels, affine_activation=affine_activation,
use_partial_padding=use_partial_padding)
elif affine_model == 'simple_conv':
self.affine_param_predictor = SimpleConvNet(
int(n_mel_channels / 2), n_context_dim, n_mel_channels,
n_layers, with_dilation=with_dilation, kernel_size=kernel_size,
use_partial_padding=use_partial_padding)
self.n_mel_channels = n_mel_channels
def get_scaling_and_logs(self, scale_unconstrained):
if self.scaling_fn == 'translate':
s = torch.exp(scale_unconstrained*0)
log_s = scale_unconstrained*0
elif self.scaling_fn == 'exp':
s = torch.exp(scale_unconstrained)
log_s = scale_unconstrained # log(exp
elif self.scaling_fn == 'tanh':
s = torch.tanh(scale_unconstrained) + 1 + 1e-6
log_s = torch.log(s)
elif self.scaling_fn == 'sigmoid':
s = torch.sigmoid(scale_unconstrained + 10) + 1e-6
log_s = torch.log(s)
elif isinstance(self.scaling_fn, list):
s_list, log_s_list = [], []
for i in range(scale_unconstrained.shape[1]):
scaling_i = self.scaling_fn[i]
if scaling_i == 'translate':
s_i = torch.exp(scale_unconstrained[:i]*0)
log_s_i = scale_unconstrained[:, i]*0
elif scaling_i == 'exp':
s_i = torch.exp(scale_unconstrained[:, i])
log_s_i = scale_unconstrained[:, i]
elif scaling_i == 'tanh':
s_i = torch.tanh(scale_unconstrained[:, i]) + 1 + 1e-6
log_s_i = torch.log(s_i)
elif scaling_i == 'sigmoid':
s_i = torch.sigmoid(scale_unconstrained[:, i]) + 1e-6
log_s_i = torch.log(s_i)
s_list.append(s_i[:, None])
log_s_list.append(log_s_i[:, None])
s = torch.cat(s_list, dim=1)
log_s = torch.cat(log_s_list, dim=1)
return s, log_s
def forward(self, z, context, inverse=False, seq_lens=None):
n_half = int(self.n_mel_channels / 2)
z_0, z_1 = z[:, :n_half], z[:, n_half:]
if self.affine_model == 'wavenet':
affine_params = self.affine_param_predictor(
(z_0, context), seq_lens=seq_lens)
elif self.affine_model == 'simple_conv':
z_w_context = torch.cat((z_0, context), 1)
affine_params = self.affine_param_predictor(
z_w_context, seq_lens=seq_lens)
scale_unconstrained = affine_params[:, :n_half, :]
b = affine_params[:, n_half:, :]
s, log_s = self.get_scaling_and_logs(scale_unconstrained)
if inverse:
z_1 = (z_1 - b) / s
z = torch.cat((z_0, z_1), dim=1)
return z
else:
z_1 = s * z_1 + b
z = torch.cat((z_0, z_1), dim=1)
return z, log_s
class ConvAttention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_text_channels=512,
n_att_channels=80, temperature=1.0):
super(ConvAttention, self).__init__()
self.temperature = temperature
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels, n_text_channels*2, kernel_size=3,
bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels*2, n_att_channels, kernel_size=1,
bias=True))
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels, n_mel_channels*2, kernel_size=3,
bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels*2, n_mel_channels, kernel_size=1,
bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels, n_att_channels, kernel_size=1, bias=True)
)
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens)
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def forward(self, queries, keys, query_lens, mask=None, key_lens=None,
attn_prior=None):
"""Attention mechanism for radtts. Unlike in Flowtron, we have no
restrictions such as causality etc, since we only need this during
training.
Args:
queries (torch.tensor): B x C x T1 tensor (likely mel data)
keys (torch.tensor): B x C2 x T2 tensor (text data)
query_lens: lengths for sorting the queries in descending order
mask (torch.tensor): uint8 binary mask for variable length entries
(should be in the T2 domain)
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask.
Final dim T2 should sum to 1
"""
temp = 0.0005
keys_enc = self.key_proj(keys) # B x n_attn_dims x T2
# Beware can only do this since query_dim = attn_dim = n_mel_channels
queries_enc = self.query_proj(queries)
# Gaussian Isotopic Attention
# B x n_attn_dims x T1 x T2
attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None])**2
# compute log-likelihood from gaussian
eps = 1e-8
attn = -temp * attn.sum(1, keepdim=True)
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + eps)
attn_logprob = attn.clone()
if mask is not None:
attn.data.masked_fill_(
mask.permute(0, 2, 1).unsqueeze(2), -float("inf"))
attn = self.softmax(attn) # softmax along T2
return attn, attn_logprob
| radtts-main | common.py |
# Original source taken from https://github.com/LiyuanLucasLiu/RAdam
#
# Copyright 2019 Liyuan Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
# pylint: disable=no-name-in-module
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
"""RAdam optimizer"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
"""
Init
:param params: parameters to optimize
:param lr: learning rate
:param betas: beta
:param eps: numerical precision
:param weight_decay: weight decay weight
"""
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for _ in range(10)]
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients'
)
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = (
state['exp_avg_sq'].type_as(p_data_fp32)
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = (
N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
group['lr'] *
math.sqrt(
(1 - beta2_t) * (N_sma - 4) /
(N_sma_max - 4) * (N_sma - 2) /
N_sma * N_sma_max / (N_sma_max - 2)
) / (1 - beta1 ** state['step'])
)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(
-group['weight_decay'] * group['lr'], p_data_fp32
)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| radtts-main | radam.py |
# adapted from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/FastPitch/fastpitch/transformer.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import get_mask_from_lengths, LinearNorm
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.matmul(torch.unsqueeze(pos_seq, -1),
torch.unsqueeze(self.inv_freq, 0))
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
return self._forward(inp)
def _forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out).to(inp.dtype))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out).to(inp.dtype)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask=None):
return self._forward(inp, attn_mask)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
# residual connection + layer normalization
output = self.layer_norm(residual + attn_out)
output = output.to(attn_out.dtype)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
**kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout)
def forward(self, dec_inp, mask=None):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
output *= mask
output = self.pos_ff(output)
output *= mask
return output
class FFTransformer(nn.Module):
def __init__(self, in_dim, out_dim=1, n_layers=6, n_head=1, d_head=64,
d_inner=1024, kernel_size=3, dropout=0.1, dropatt=0.1,
dropemb=0.0):
super(FFTransformer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.n_head = n_head
self.d_head = d_head
self.pos_emb = PositionalEmbedding(self.in_dim)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layers):
self.layers.append(
TransformerLayer(
n_head, in_dim, d_head, d_inner, kernel_size, dropout,
dropatt=dropatt)
)
self.dense = LinearNorm(in_dim, out_dim)
def forward(self, dec_inp, in_lens):
# B, C, T --> B, T, C
inp = dec_inp.transpose(1, 2)
mask = get_mask_from_lengths(in_lens)[..., None]
pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
out = self.drop(inp + pos_emb)
for layer in self.layers:
out = layer(out, mask=mask)
out = self.dense(out).transpose(1, 2)
return out
| radtts-main | transformer.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# AR_Back_Step and AR_Step based on implementation from
# https://github.com/NVIDIA/flowtron/blob/master/flowtron.py
# Original license text:
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
# Original Author and Contact: Rafael Valle
# Modification by Rafael Valle
import torch
from torch import nn
from common import DenseLayer, SplineTransformationLayerAR
class AR_Back_Step(torch.nn.Module):
def __init__(self, n_attr_channels, n_speaker_dim, n_text_dim,
n_hidden, n_lstm_layers, scaling_fn, spline_flow_params=None):
super(AR_Back_Step, self).__init__()
self.ar_step = AR_Step(n_attr_channels, n_speaker_dim, n_text_dim,
n_hidden, n_lstm_layers, scaling_fn,
spline_flow_params)
def forward(self, mel, context, lens):
mel = torch.flip(mel, (0, ))
context = torch.flip(context, (0, ))
# backwards flow, send padded zeros back to end
for k in range(mel.size(1)):
mel[:, k] = mel[:, k].roll(lens[k].item(), dims=0)
context[:, k] = context[:, k].roll(lens[k].item(), dims=0)
mel, log_s = self.ar_step(mel, context, lens)
# move padded zeros back to beginning
for k in range(mel.size(1)):
mel[:, k] = mel[:, k].roll(-lens[k].item(), dims=0)
return torch.flip(mel, (0, )), log_s
def infer(self, residual, context):
residual = self.ar_step.infer(
torch.flip(residual, (0, )), torch.flip(context, (0, )))
residual = torch.flip(residual, (0, ))
return residual
class AR_Step(torch.nn.Module):
def __init__(self, n_attr_channels, n_speaker_dim, n_text_channels,
n_hidden, n_lstm_layers, scaling_fn, spline_flow_params=None):
super(AR_Step, self).__init__()
if spline_flow_params is not None:
self.spline_flow = SplineTransformationLayerAR(
**spline_flow_params)
else:
self.n_out_dims = n_attr_channels
self.conv = torch.nn.Conv1d(n_hidden, 2*n_attr_channels, 1)
self.conv.weight.data = 0.0 * self.conv.weight.data
self.conv.bias.data = 0.0 * self.conv.bias.data
self.attr_lstm = torch.nn.LSTM(n_attr_channels, n_hidden)
self.lstm = torch.nn.LSTM(n_hidden + n_text_channels + n_speaker_dim,
n_hidden, n_lstm_layers)
if spline_flow_params is None:
self.dense_layer = DenseLayer(in_dim=n_hidden,
sizes=[n_hidden, n_hidden])
self.scaling_fn = scaling_fn
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(
padded_data, lens.cpu())
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def get_scaling_and_logs(self, scale_unconstrained):
if self.scaling_fn == 'translate':
s = torch.exp(scale_unconstrained*0)
log_s = scale_unconstrained*0
elif self.scaling_fn == 'exp':
s = torch.exp(scale_unconstrained)
log_s = scale_unconstrained # log(exp
elif self.scaling_fn == 'tanh':
s = torch.tanh(scale_unconstrained) + 1 + 1e-6
log_s = torch.log(s)
elif self.scaling_fn == 'sigmoid':
s = torch.sigmoid(scale_unconstrained + 10) + 1e-6
log_s = torch.log(s)
else:
raise Exception("Scaling fn {} not supp.".format(self.scaling_fn))
return s, log_s
def forward(self, mel, context, lens):
dummy = torch.FloatTensor(1, mel.size(1), mel.size(2)).zero_()
dummy = dummy.type(mel.type())
# seq_len x batch x dim
mel0 = torch.cat([dummy, mel[:-1]], 0)
self.lstm.flatten_parameters()
self.attr_lstm.flatten_parameters()
if lens is not None:
# collect decreasing length indices
lens, ids = torch.sort(lens, descending=True)
original_ids = [0] * lens.size(0)
for i, ids_i in enumerate(ids):
original_ids[ids_i] = i
# mel_seq_len x batch x hidden_dim
mel_hidden = self.run_padded_sequence(
ids, original_ids, lens, mel0, self.attr_lstm)
else:
mel_hidden = self.attr_lstm(mel0)[0]
decoder_input = torch.cat((mel_hidden, context), -1)
if lens is not None:
# reorder, run padded sequence and undo reordering
lstm_hidden = self.run_padded_sequence(
ids, original_ids, lens, decoder_input, self.lstm)
else:
lstm_hidden = self.lstm(decoder_input)[0]
if hasattr(self, 'spline_flow'):
# spline flow fn expects inputs to be batch, channel, time
lstm_hidden = lstm_hidden.permute(1, 2, 0)
mel = mel.permute(1, 2, 0)
mel, log_s = self.spline_flow(mel, lstm_hidden, inverse=False)
mel = mel.permute(2, 0, 1)
log_s = log_s.permute(2, 0, 1)
else:
lstm_hidden = self.dense_layer(lstm_hidden).permute(1, 2, 0)
decoder_output = self.conv(lstm_hidden).permute(2, 0, 1)
scale, log_s = self.get_scaling_and_logs(
decoder_output[:, :, :self.n_out_dims])
bias = decoder_output[:, :, self.n_out_dims:]
mel = scale * mel + bias
return mel, log_s
def infer(self, residual, context):
total_output = [] # seems 10FPS faster than pre-allocation
output = None
dummy = torch.cuda.FloatTensor(
1, residual.size(1), residual.size(2)).zero_()
self.attr_lstm.flatten_parameters()
for i in range(0, residual.size(0)):
if i == 0:
output = dummy
mel_hidden, (h, c) = self.attr_lstm(output)
else:
mel_hidden, (h, c) = self.attr_lstm(output, (h, c))
decoder_input = torch.cat((mel_hidden, context[i][None]), -1)
if i == 0:
lstm_hidden, (h1, c1) = self.lstm(decoder_input)
else:
lstm_hidden, (h1, c1) = self.lstm(decoder_input, (h1, c1))
if hasattr(self, 'spline_flow'):
# expects inputs to be batch, channel, time
lstm_hidden = lstm_hidden.permute(1, 2, 0)
output = residual[i:i+1].permute(1, 2, 0)
output = self.spline_flow(output, lstm_hidden, inverse=True)
output = output.permute(2, 0, 1)
else:
lstm_hidden = self.dense_layer(lstm_hidden).permute(1, 2, 0)
decoder_output = self.conv(lstm_hidden).permute(2, 0, 1)
s, log_s = self.get_scaling_and_logs(
decoder_output[:, :, :decoder_output.size(2)//2])
b = decoder_output[:, :, decoder_output.size(2)//2:]
output = (residual[i:i+1] - b)/s
total_output.append(output)
total_output = torch.cat(total_output, 0)
return total_output
| radtts-main | autoregressive_flow.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import json
import os
import hashlib
import torch
from timeit import default_timer as timer
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.cuda import amp
from radam import RAdam
from loss import RADTTSLoss, AttentionBinarizationLoss
from radtts import RADTTS
from data import Data, DataCollate
from plotting_utils import plot_alignment_to_numpy
from common import update_params
import numpy as np
from distributed import (init_distributed, apply_gradient_allreduce,
reduce_tensor)
from torch.utils.data.distributed import DistributedSampler
from inference import load_vocoder
def freeze(model):
for p in model.parameters():
p.requires_grad = False
def unfreeze(model):
for p in model.parameters():
p.requires_grad = True
def prepare_output_folders_and_logger(output_directory):
# Get shared output_directory ready
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)
output_config_path = os.path.join(output_directory, 'config.json')
print("saving current configuration in output dir")
config_fp = open(output_config_path, 'w')
json.dump(config, config_fp, indent=4)
config_fp.close()
output_code_path = os.path.join(output_directory, 'code.tar.gz')
os.system('tar -czvf %s *.py' % (output_code_path))
tboard_out_path = os.path.join(output_directory, 'logs')
print("setting up tboard log in %s" % (tboard_out_path))
logger = SummaryWriter(tboard_out_path)
return logger
def prepare_model_weights(model, unfreeze_modules):
if unfreeze_modules != 'all':
freeze(model) # freeze everything
if 'dur' in unfreeze_modules and hasattr(model, 'dur_pred_layer'):
print("Training duration prediction")
unfreeze(model.dur_pred_layer)
if 'f0' in unfreeze_modules and hasattr(model, 'f0_pred_module'):
print("Training F0 prediction")
unfreeze(model.f0_pred_module)
if 'energy' in unfreeze_modules and hasattr(model, 'energy_pred_module'):
print("Training energy prediction")
unfreeze(model.energy_pred_module)
if 'vpred' in unfreeze_modules and hasattr(model, 'v_pred_module'):
print("Training voiced prediction")
unfreeze(model.v_pred_module)
if hasattr(model, 'v_embeddings'):
print("Training voiced embeddings")
unfreeze(model.v_embeddings)
if 'unvbias' in unfreeze_modules and hasattr(model, 'unvoiced_bias_module'):
print("Training unvoiced bias")
unfreeze(model.unvoiced_bias_module)
else:
print("Training everything")
def parse_data_from_batch(batch):
mel = batch['mel']
speaker_ids = batch['speaker_ids']
text = batch['text']
in_lens = batch['input_lengths']
out_lens = batch['output_lengths']
attn_prior = batch['attn_prior']
f0 = batch['f0']
voiced_mask = batch['voiced_mask']
p_voiced = batch['p_voiced']
energy_avg = batch['energy_avg']
audiopaths = batch['audiopaths']
if attn_prior is not None:
attn_prior = attn_prior.cuda()
if f0 is not None:
f0 = f0.cuda()
if voiced_mask is not None:
voiced_mask = voiced_mask.cuda()
if p_voiced is not None:
p_voiced = p_voiced.cuda()
if energy_avg is not None:
energy_avg = energy_avg.cuda()
mel, speaker_ids = mel.cuda(), speaker_ids.cuda()
text = text.cuda()
in_lens, out_lens = in_lens.cuda(), out_lens.cuda()
return (mel, speaker_ids, text, in_lens, out_lens, attn_prior, f0,
voiced_mask, p_voiced, energy_avg, audiopaths)
def prepare_dataloaders(data_config, n_gpus, batch_size):
# Get data, data loaders and collate function ready
ignore_keys = ['training_files', 'validation_files']
print("initializing training dataloader")
trainset = Data(data_config['training_files'],
**dict((k, v) for k, v in data_config.items()
if k not in ignore_keys))
print("initializing validation dataloader")
data_config_val = data_config.copy()
data_config_val['aug_probabilities'] = None # no aug in val set
valset = Data(data_config['validation_files'],
**dict((k, v) for k, v in data_config_val.items()
if k not in ignore_keys), speaker_ids=trainset.speaker_ids)
collate_fn = DataCollate()
train_sampler, shuffle = None, True
if n_gpus > 1:
train_sampler, shuffle = DistributedSampler(trainset), False
train_loader = DataLoader(trainset, num_workers=8, shuffle=shuffle,
sampler=train_sampler, batch_size=batch_size,
pin_memory=False, drop_last=True,
collate_fn=collate_fn)
return train_loader, valset, collate_fn
def warmstart(checkpoint_path, model, include_layers=[],
ignore_layers_warmstart=[]):
pretrained_dict = torch.load(checkpoint_path, map_location='cpu')
pretrained_dict = pretrained_dict['state_dict']
if len(include_layers):
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if any(l in k for l in include_layers)}
if len(ignore_layers_warmstart):
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if all(l not in k for l in ignore_layers_warmstart)}
model_dict = model.state_dict()
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print("Warm started from {}".format(checkpoint_path))
return model
def load_checkpoint(checkpoint_path, model, optimizer, ignore_layers=[]):
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
model_dict = checkpoint_dict['state_dict']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model.load_state_dict(model_dict)
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'state_dict': model.state_dict(),
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def compute_validation_loss(iteration, model, criterion, valset, collate_fn,
batch_size, n_gpus, logger=None, train_config=None):
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if n_gpus > 1 else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=8,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
loss_outputs_full = {}
n_batches = len(val_loader)
for i, batch in enumerate(val_loader):
(mel, speaker_ids, text, in_lens, out_lens, attn_prior,
f0, voiced_mask, p_voiced, energy_avg,
audiopaths) = parse_data_from_batch(batch)
outputs = model(
mel, speaker_ids, text, in_lens, out_lens,
binarize_attention=True, attn_prior=attn_prior, f0=f0,
energy_avg=energy_avg, voiced_mask=voiced_mask,
p_voiced=p_voiced)
loss_outputs = criterion(outputs, in_lens, out_lens)
for k, (v, w) in loss_outputs.items():
reduced_v = reduce_tensor(v, n_gpus, 0).item()
if k in loss_outputs_full.keys():
loss_outputs_full[k] += (reduced_v / n_batches)
else:
loss_outputs_full[k] = (reduced_v / n_batches)
if logger is not None:
for k, v in loss_outputs_full.items():
logger.add_scalar('val/'+k, v, iteration)
attn_used = outputs['attn']
attn_soft = outputs['attn_soft']
audioname = os.path.basename(audiopaths[0])
if attn_used is not None:
logger.add_image(
'attention_weights',
plot_alignment_to_numpy(
attn_soft[0, 0].data.cpu().numpy().T, title=audioname),
iteration, dataformats='HWC')
logger.add_image(
'attention_weights_mas',
plot_alignment_to_numpy(
attn_used[0, 0].data.cpu().numpy().T, title=audioname),
iteration, dataformats='HWC')
attribute_sigmas = []
""" NOTE: if training vanilla radtts (no attributes involved),
use log_attribute_samples only, as there will be no ground truth
features available. The infer function in this case will work with
f0=None, energy_avg=None, and voiced_mask=None
"""
if train_config['log_decoder_samples']: # decoder with gt features
attribute_sigmas.append(-1)
if train_config['log_attribute_samples']: # attribute prediction
if model.is_attribute_unconditional():
attribute_sigmas.extend([1.0])
else:
attribute_sigmas.extend([0.1, 0.5, 0.8, 1.0])
if len(attribute_sigmas) > 0:
durations = attn_used[0, 0].sum(0, keepdim=True)
durations = (durations + 0.5).floor().int()
# load vocoder to CPU to avoid taking up valuable GPU vRAM
vocoder_checkpoint_path = train_config['vocoder_checkpoint_path']
vocoder_config_path = train_config['vocoder_config_path']
vocoder, denoiser = load_vocoder(
vocoder_checkpoint_path, vocoder_config_path, to_cuda=False)
for attribute_sigma in attribute_sigmas:
try:
if attribute_sigma > 0.0:
model_output = model.infer(
speaker_ids[0:1], text[0:1], 0.8,
dur=durations, f0=None, energy_avg=None,
voiced_mask=None, sigma_f0=attribute_sigma,
sigma_energy=attribute_sigma)
else:
model_output = model.infer(
speaker_ids[0:1], text[0:1], 0.8,
dur=durations, f0=f0[0:1, :durations.sum()],
energy_avg=energy_avg[0:1, :durations.sum()],
voiced_mask=voiced_mask[0:1, :durations.sum()])
except:
print("Instability or issue occured during inference, skipping sample generation for TB logger")
continue
mels = model_output['mel']
audio = vocoder(mels.cpu()).float()[0]
audio_denoised = denoiser(
audio, strength=0.00001)[0].float()
audio_denoised = audio_denoised[0].detach().cpu().numpy()
audio_denoised = audio_denoised / np.abs(audio_denoised).max()
if attribute_sigma < 0:
sample_tag = "decoder_sample_gt_attributes"
else:
sample_tag = f"sample_attribute_sigma_{attribute_sigma}"
logger.add_audio(sample_tag, audio_denoised, iteration, data_config['sampling_rate'])
model.train()
return loss_outputs_full
def train(n_gpus, rank, output_directory, epochs, optim_algo, learning_rate,
weight_decay, sigma, iters_per_checkpoint, batch_size, seed,
checkpoint_path, ignore_layers, ignore_layers_warmstart,
include_layers, finetune_layers, warmstart_checkpoint_path,
use_amp, grad_clip_val, loss_weights,
binarization_start_iter=-1, kl_loss_start_iter=-1,
unfreeze_modules="all", **kwargs):
if seed is None:
# convert output directory to seed using a hash
print(output_directory)
seed = hashlib.md5(output_directory.encode()).hexdigest()
seed = int(seed, 16) % 2000
print('Using seed {}'.format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if n_gpus > 1:
init_distributed(rank, n_gpus, **dist_config)
criterion = RADTTSLoss(
sigma,
model_config['n_group_size'],
model_config['dur_model_config'],
model_config['f0_model_config'],
model_config['energy_model_config'],
vpred_model_config=model_config['v_model_config'],
loss_weights=loss_weights
)
attention_kl_loss = AttentionBinarizationLoss()
model = RADTTS(**model_config).cuda()
print("Initializing {} optimizer".format(optim_algo))
if len(finetune_layers):
for name, param in model.named_parameters():
if any([l in name for l in finetune_layers]): # short list hack
print("Fine-tuning parameter", name)
param.requires_grad = True
else:
param.requires_grad = False
if optim_algo == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=weight_decay)
elif optim_algo == 'RAdam':
optimizer = RAdam(model.parameters(), lr=learning_rate,
weight_decay=weight_decay)
else:
print("Unrecognized optimizer {}!".format(optim_algo))
exit(1)
# Load checkpoint if one exists
iteration = 0
if warmstart_checkpoint_path != "":
model = warmstart(warmstart_checkpoint_path, model, include_layers,
ignore_layers_warmstart)
if checkpoint_path != "":
model, optimizer, iteration = load_checkpoint(
checkpoint_path, model, optimizer, ignore_layers)
iteration += 1 # next iteration is iteration + 1
if n_gpus > 1:
model = apply_gradient_allreduce(model)
print(model)
scaler = amp.GradScaler(enabled=use_amp)
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
train_loader, valset, collate_fn = prepare_dataloaders(
data_config, n_gpus, batch_size)
if rank == 0:
logger = prepare_output_folders_and_logger(output_directory)
prepare_model_weights(model, unfreeze_modules)
model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for batch in train_loader:
tic = timer()
model.zero_grad()
(mel, speaker_ids, text, in_lens, out_lens, attn_prior,
f0, voiced_mask, p_voiced, energy_avg,
audiopaths) = parse_data_from_batch(batch)
if iteration >= binarization_start_iter:
binarize = True # binarization training phase
else:
binarize = False # no binarization, soft alignments only
with amp.autocast(use_amp):
outputs = model(
mel, speaker_ids, text, in_lens, out_lens,
binarize_attention=binarize, attn_prior=attn_prior,
f0=f0, energy_avg=energy_avg,
voiced_mask=voiced_mask, p_voiced=p_voiced)
loss_outputs = criterion(outputs, in_lens, out_lens)
loss = None
for k, (v, w) in loss_outputs.items():
if w > 0:
loss = v * w if loss is None else loss + v * w
w_bin = criterion.loss_weights.get('binarization_loss_weight', 1.0)
if binarize and iteration >= kl_loss_start_iter:
binarization_loss = attention_kl_loss(
outputs['attn'], outputs['attn_soft'])
loss += binarization_loss * w_bin
else:
binarization_loss = torch.zeros_like(loss)
loss_outputs['binarization_loss'] = (binarization_loss, w_bin)
scaler.scale(loss).backward()
if grad_clip_val > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(
model.parameters(), grad_clip_val)
scaler.step(optimizer)
scaler.update()
toc = timer()
current_lr = optimizer.param_groups[0]['lr']
print_list = ["iter: {} ({:.2f} s) | lr: {}".format(
iteration, toc-tic, current_lr)]
for k, (v, w) in loss_outputs.items():
reduced_v = reduce_tensor(v, n_gpus, 0).item()
loss_outputs[k] = reduced_v
if rank == 0:
print_list.append(' | {}: {:.3f}'.format(k, v))
logger.add_scalar('train/'+k, reduced_v, iteration)
if rank == 0:
print(''.join(print_list), flush=True)
if iteration > -1 and iteration % iters_per_checkpoint == 0:
if rank == 0:
val_loss_outputs = compute_validation_loss(
iteration, model, criterion, valset, collate_fn,
batch_size, n_gpus, logger=logger,
train_config=train_config)
checkpoint_path = "{}/model_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
print('Validation loss:', val_loss_outputs)
else:
val_loss_outputs = compute_validation_loss(
iteration, model, criterion, valset, collate_fn,
batch_size, n_gpus)
iteration += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-p', '--params', nargs='+', default=[])
args = parser.parse_args()
args.rank = 0
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
global config
config = json.loads(data)
update_params(config, args.params)
print(config)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global dist_config
dist_config = config["dist_config"]
global model_config
model_config = config["model_config"]
# make sure we have enough augmentation dimensions
if 'n_aug_dims' in model_config.keys() and \
'aug_probabilities' in data_config.keys():
assert(model_config['n_aug_dims'] >= len(data_config['aug_probabilities']))
# Make sure the launcher sets `RANK` and `WORLD_SIZE`.
rank = int(os.getenv('RANK', '0'))
n_gpus = int(os.getenv("WORLD_SIZE", '1'))
print('> got rank {} and world size {} ...'.format(rank, n_gpus))
if n_gpus == 1 and rank != 0:
raise Exception("Doing single GPU training on rank > 0")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(n_gpus, rank, **train_config)
| radtts-main | train.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import argparse
import os
import json
import numpy as np
import torch
from torch.cuda import amp
from scipy.io.wavfile import write
from radtts import RADTTS
from data import Data
from common import update_params
from hifigan_models import Generator
from hifigan_env import AttrDict
from hifigan_denoiser import Denoiser
def lines_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
def load_vocoder(vocoder_path, config_path, to_cuda=True):
with open(config_path) as f:
data_vocoder = f.read()
config_vocoder = json.loads(data_vocoder)
h = AttrDict(config_vocoder)
if 'blur' in vocoder_path:
config_vocoder['gaussian_blur']['p_blurring'] = 0.5
else:
if 'gaussian_blur' in config_vocoder:
config_vocoder['gaussian_blur']['p_blurring'] = 0.0
else:
config_vocoder['gaussian_blur'] = {'p_blurring': 0.0}
h['gaussian_blur'] = {'p_blurring': 0.0}
state_dict_g = torch.load(vocoder_path, map_location='cpu')['generator']
# load hifigan
vocoder = Generator(h)
vocoder.load_state_dict(state_dict_g)
denoiser = Denoiser(vocoder)
if to_cuda:
vocoder.cuda()
denoiser.cuda()
vocoder.eval()
denoiser.eval()
return vocoder, denoiser
def infer(radtts_path, vocoder_path, vocoder_config_path, text_path, speaker,
speaker_text, speaker_attributes, sigma, sigma_tkndur, sigma_f0,
sigma_energy, f0_mean, f0_std, energy_mean, energy_std,
token_dur_scaling, denoising_strength, n_takes, output_dir, use_amp,
plot, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
vocoder, denoiser = load_vocoder(vocoder_path, vocoder_config_path)
radtts = RADTTS(**model_config).cuda()
radtts.enable_inverse_cache() # cache inverse matrix for 1x1 invertible convs
checkpoint_dict = torch.load(radtts_path, map_location='cpu')
state_dict = checkpoint_dict['state_dict']
radtts.load_state_dict(state_dict, strict=False)
radtts.eval()
print("Loaded checkpoint '{}')" .format(radtts_path))
ignore_keys = ['training_files', 'validation_files']
trainset = Data(
data_config['training_files'],
**dict((k, v) for k, v in data_config.items() if k not in ignore_keys))
speaker_id = trainset.get_speaker_id(speaker).cuda()
speaker_id_text, speaker_id_attributes = speaker_id, speaker_id
if speaker_text is not None:
speaker_id_text = trainset.get_speaker_id(speaker_text).cuda()
if speaker_attributes is not None:
speaker_id_attributes = trainset.get_speaker_id(
speaker_attributes).cuda()
text_list = lines_to_list(text_path)
os.makedirs(output_dir, exist_ok=True)
for i, text in enumerate(text_list):
if text.startswith("#"):
continue
print("{}/{}: {}".format(i, len(text_list), text))
text = trainset.get_text(text).cuda()[None]
for take in range(n_takes):
with amp.autocast(use_amp):
with torch.no_grad():
outputs = radtts.infer(
speaker_id, text, sigma, sigma_tkndur, sigma_f0,
sigma_energy, token_dur_scaling, token_duration_max=100,
speaker_id_text=speaker_id_text,
speaker_id_attributes=speaker_id_attributes,
f0_mean=f0_mean, f0_std=f0_std, energy_mean=energy_mean,
energy_std=energy_std)
mel = outputs['mel']
audio = vocoder(mel).float()[0]
audio_denoised = denoiser(
audio, strength=denoising_strength)[0].float()
audio = audio[0].cpu().numpy()
audio_denoised = audio_denoised[0].cpu().numpy()
audio_denoised = audio_denoised / np.max(np.abs(audio_denoised))
suffix_path = "{}_{}_{}_durscaling{}_sigma{}_sigmatext{}_sigmaf0{}_sigmaenergy{}".format(
i, take, speaker, token_dur_scaling, sigma, sigma_tkndur, sigma_f0,
sigma_energy)
write("{}/{}_denoised_{}.wav".format(
output_dir, suffix_path, denoising_strength),
data_config['sampling_rate'], audio_denoised)
if plot:
fig, axes = plt.subplots(2, 1, figsize=(10, 6))
axes[0].plot(outputs['f0'].cpu().numpy()[0], label='f0')
axes[1].plot(outputs['energy_avg'].cpu().numpy()[0], label='energy_avg')
for ax in axes:
ax.legend(loc='best')
plt.tight_layout()
fig.savefig("{}/{}_features.png".format(output_dir, suffix_path))
plt.close('all')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, help='JSON file config')
parser.add_argument('-k', '--config_vocoder', type=str, help='vocoder JSON file config')
parser.add_argument('-p', '--params', nargs='+', default=[])
parser.add_argument('-r', '--radtts_path', type=str)
parser.add_argument('-v', '--vocoder_path', type=str)
parser.add_argument('-t', '--text_path', type=str)
parser.add_argument('-s', '--speaker', type=str)
parser.add_argument('--speaker_text', type=str, default=None)
parser.add_argument('--speaker_attributes', type=str, default=None)
parser.add_argument('-d', '--denoising_strength', type=float, default=0.0)
parser.add_argument('-o', "--output_dir", default="results")
parser.add_argument("--sigma", default=0.8, type=float, help="sampling sigma for decoder")
parser.add_argument("--sigma_tkndur", default=0.666, type=float, help="sampling sigma for duration")
parser.add_argument("--sigma_f0", default=1.0, type=float, help="sampling sigma for f0")
parser.add_argument("--sigma_energy", default=1.0, type=float, help="sampling sigma for energy avg")
parser.add_argument("--f0_mean", default=0.0, type=float)
parser.add_argument("--f0_std", default=0.0, type=float)
parser.add_argument("--energy_mean", default=0.0, type=float)
parser.add_argument("--energy_std", default=0.0, type=float)
parser.add_argument("--token_dur_scaling", default=1.00, type=float)
parser.add_argument("--n_takes", default=1, type=int)
parser.add_argument("--use_amp", action="store_true")
parser.add_argument("--plot", action="store_true")
parser.add_argument("--seed", default=1234, type=int)
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
global config
config = json.loads(data)
update_params(config, args.params)
data_config = config["data_config"]
global model_config
model_config = config["model_config"]
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
infer(args.radtts_path, args.vocoder_path, args.config_vocoder,
args.text_path, args.speaker, args.speaker_text,
args.speaker_attributes, args.sigma, args.sigma_tkndur, args.sigma_f0,
args.sigma_energy, args.f0_mean, args.f0_std, args.energy_mean,
args.energy_std, args.token_dur_scaling, args.denoising_strength,
args.n_takes, args.output_dir, args.use_amp, args.plot, args.seed)
| radtts-main | inference.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
from audio_processing import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with hifigan """
def __init__(self, hifigan, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length)
self.stft = self.stft.to(hifigan.ups[0].weight.device)
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=hifigan.ups[0].weight.dtype,
device=hifigan.ups[0].weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=hifigan.upsample.weight.dtype,
device=hifigan.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = hifigan(mel_input).float()[0]
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| radtts-main | hifigan_denoiser.py |
# Modified partialconv source code based on implementation from
# https://github.com/NVIDIA/partialconv/blob/master/models/partialconv2d.py
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Guilin Liu ([email protected])
###############################################################################
# Original Author & Contact: Guilin Liu ([email protected])
# Modified by Kevin Shih ([email protected])
import torch
import torch.nn.functional as F
from torch import nn
from typing import Tuple
class PartialConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
self.multi_channel = False
self.return_mask = False
super(PartialConv1d, self).__init__(*args, **kwargs)
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2]
self.last_size = (None, None, None)
self.update_mask = None
self.mask_ratio = None
@torch.jit.ignore
def forward(self, input: torch.Tensor, mask_in : torch.Tensor = None):
"""
input: standard input to a 1D conv
mask_in: binary mask for valid values, same shape as input
"""
assert len(input.shape) == 3
# if a mask is input, or tensor shape changed, update mask ratio
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
mask = torch.ones(1, 1, input.data.shape[2]).to(input)
else:
mask = mask_in
self.update_mask = F.conv1d(mask, self.weight_maskUpdater,
bias=None, stride=self.stride,
padding=self.padding,
dilation=self.dilation, groups=1)
# for mixed precision training, change 1e-8 to 1e-6
self.mask_ratio = self.slide_winsize/(self.update_mask + 1e-6)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = super(PartialConv1d, self).forward(
torch.mul(input, mask) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
| radtts-main | partialconv1d.py |
# original source takes from https://github.com/jik876/hifi-gan/
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from hifigan_utils import init_weights, get_padding
LRELU_SLOPE = 0.1
class GaussianBlurAugmentation(nn.Module):
def __init__(self, kernel_size, sigmas, p_blurring):
super(GaussianBlurAugmentation, self).__init__()
self.kernel_size = kernel_size
self.sigmas = sigmas
kernels = self.initialize_kernels(kernel_size, sigmas)
self.register_buffer('kernels', kernels)
self.p_blurring = p_blurring
self.conv = F.conv2d
def initialize_kernels(self, kernel_size, sigmas):
mesh_grids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
kernels = []
for sigma in sigmas:
kernel = 1
sigma = [sigma] * len(kernel_size)
for size, std, mgrid in zip(kernel_size, sigma, mesh_grids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1))
kernels.append(kernel[None])
kernels = torch.cat(kernels)
return kernels
def forward(self, x):
if torch.rand(1)[0] > self.p_blurring:
return x
else:
i = torch.randint(len(self.kernels), (1,))[0]
kernel = self.kernels[i]
pad = int((self.kernel_size[0] - 1) / 2)
x = F.pad(x[:, None], (pad, pad, pad, pad), mode='reflect')
x = self.conv(x, weight=kernel)[:, 0]
return x
class ResBlock1(torch.nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.lrelu_slope = LRELU_SLOPE
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c1(xt)
xt = F.leaky_relu(xt, self.lrelu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
self.lrelu_slope = LRELU_SLOPE
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples', 'p_blur']
def __init__(self, h):
super(Generator, self).__init__()
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
self.p_blur = h.gaussian_blur['p_blurring']
self.gaussian_blur_fn = None
if self.p_blur > 0.0:
self.gaussian_blur_fn = GaussianBlurAugmentation(h.gaussian_blur['kernel_size'], h.gaussian_blur['sigmas'], self.p_blur)
else:
self.gaussian_blur_fn = nn.Identity()
self.lrelu_slope = LRELU_SLOPE
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
resblock_list = nn.ModuleList()
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
resblock_list.append(resblock(h, ch, k, d))
self.resblocks.append(resblock_list)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def load_state_dict(self, state_dict):
new_state_dict = {}
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 5:
layer = int(parts[1])
new_layer = f"{layer//3}.{layer%3}"
new_k = f"resblocks.{new_layer}.{'.'.join(parts[2:])}"
new_state_dict[new_k] = v
super().load_state_dict(new_state_dict)
def forward(self, x):
if self.p_blur > 0.0:
x = self.gaussian_blur_fn(x)
x = self.conv_pre(x)
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
x = F.leaky_relu(x, self.lrelu_slope)
x = upsample_layer(x)
xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device)
for resblock in resblock_group:
xs += resblock(x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for group in self.resblocks:
for block in group:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
__constants__ = ['LRELU_SLOPE']
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
__constants__ = ['LRELU_SLOPE']
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=2),
AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| radtts-main | hifigan_models.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
from torch import nn
from common import ConvNorm, Invertible1x1Conv
from common import AffineTransformationLayer, SplineTransformationLayer
from common import ConvLSTMLinear
from transformer import FFTransformer
from autoregressive_flow import AR_Step, AR_Back_Step
def get_attribute_prediction_model(config):
name = config['name']
hparams = config['hparams']
if name == 'dap':
model = DAP(**hparams)
elif name == 'bgap':
model = BGAP(**hparams)
elif name == 'agap':
model = AGAP(**hparams)
else:
raise Exception("{} model is not supported".format(name))
return model
class AttributeProcessing():
def __init__(self, take_log_of_input=False):
super(AttributeProcessing).__init__()
self.take_log_of_input = take_log_of_input
def normalize(self, x):
if self.take_log_of_input:
x = torch.log(x + 1)
return x
def denormalize(self, x):
if self.take_log_of_input:
x = torch.exp(x) - 1
return x
class BottleneckLayerLayer(nn.Module):
def __init__(self, in_dim, reduction_factor, norm='weightnorm',
non_linearity='relu', kernel_size=3, use_partial_padding=False):
super(BottleneckLayerLayer, self).__init__()
self.reduction_factor = reduction_factor
reduced_dim = int(in_dim / reduction_factor)
self.out_dim = reduced_dim
if self.reduction_factor > 1:
fn = ConvNorm(in_dim, reduced_dim, kernel_size=kernel_size,
use_weight_norm=(norm == 'weightnorm'))
if norm == 'instancenorm':
fn = nn.Sequential(
fn, nn.InstanceNorm1d(reduced_dim, affine=True))
self.projection_fn = fn
self.non_linearity = nn.ReLU()
if non_linearity == 'leakyrelu':
self.non_linearity= nn.LeakyReLU()
def forward(self, x):
if self.reduction_factor > 1:
x = self.projection_fn(x)
x = self.non_linearity(x)
return x
class DAP(nn.Module):
def __init__(self, n_speaker_dim, bottleneck_hparams, take_log_of_input,
arch_hparams, use_transformer=False):
super(DAP, self).__init__()
self.attribute_processing = AttributeProcessing(take_log_of_input)
self.bottleneck_layer = BottleneckLayerLayer(**bottleneck_hparams)
arch_hparams['in_dim'] = self.bottleneck_layer.out_dim + n_speaker_dim
if use_transformer:
self.feat_pred_fn = FFTransformer(**arch_hparams)
else:
self.feat_pred_fn = ConvLSTMLinear(**arch_hparams)
def forward(self, txt_enc, spk_emb, x, lens):
if x is not None:
x = self.attribute_processing.normalize(x)
txt_enc = self.bottleneck_layer(txt_enc)
spk_emb_expanded = spk_emb[..., None].expand(-1, -1, txt_enc.shape[2])
context = torch.cat((txt_enc, spk_emb_expanded), 1)
x_hat = self.feat_pred_fn(context, lens)
outputs = {'x_hat': x_hat, 'x': x}
return outputs
def infer(self, z, txt_enc, spk_emb, lens=None):
x_hat = self.forward(txt_enc, spk_emb, x=None, lens=lens)['x_hat']
x_hat = self.attribute_processing.denormalize(x_hat)
return x_hat
class BGAP(torch.nn.Module):
def __init__(self, n_in_dim, n_speaker_dim, bottleneck_hparams, n_flows,
n_group_size, n_layers, with_dilation,
kernel_size, scaling_fn,
take_log_of_input=False,
n_channels=1024,
use_quadratic=False, n_bins=8, n_spline_steps=2):
super(BGAP, self).__init__()
# assert(n_group_size % 2 == 0)
self.n_flows = n_flows
self.n_group_size = n_group_size
self.transforms = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
self.n_speaker_dim = n_speaker_dim
self.scaling_fn = scaling_fn
self.attribute_processing = AttributeProcessing(take_log_of_input)
self.n_spline_steps = n_spline_steps
self.bottleneck_layer = BottleneckLayerLayer(**bottleneck_hparams)
n_txt_reduced_dim = self.bottleneck_layer.out_dim
context_dim = n_txt_reduced_dim * n_group_size + n_speaker_dim
if self.n_group_size > 1:
self.unfold_params = {'kernel_size': (n_group_size, 1),
'stride': n_group_size,
'padding': 0, 'dilation': 1}
self.unfold = nn.Unfold(**self.unfold_params)
for k in range(n_flows):
self.convinv.append(Invertible1x1Conv(n_in_dim * n_group_size))
if k >= n_flows-self.n_spline_steps:
left = -3
right= 3
top = 3
bottom = -3
self.transforms.append(SplineTransformationLayer(
n_in_dim * n_group_size, context_dim, n_layers,
with_dilation=with_dilation, kernel_size=kernel_size,
scaling_fn=scaling_fn,
n_channels=n_channels, top=top,
bottom=bottom, left = left, right=right,
use_quadratic=use_quadratic, n_bins=n_bins))
else:
self.transforms.append(AffineTransformationLayer(
n_in_dim * n_group_size, context_dim, n_layers,
with_dilation=with_dilation, kernel_size=kernel_size,
scaling_fn=scaling_fn,
affine_model='simple_conv', n_channels=n_channels))
def fold(self, data):
"""Inverse of the self.unfold(data.unsqueeze(-1)) operation used for
the grouping or "squeeze" operation on input
Args:
data: B x C x T tensor of temporal data
"""
output_size = (data.shape[2]*self.n_group_size, 1)
data = nn.functional.fold(
data, output_size=output_size, **self.unfold_params).squeeze(-1)
return data
def preprocess_context(self, txt_emb, speaker_vecs, std_scale=None):
if self.n_group_size > 1:
txt_emb = self.unfold(txt_emb[..., None])
speaker_vecs = speaker_vecs[..., None].expand(-1, -1, txt_emb.shape[2])
context = torch.cat((txt_emb, speaker_vecs), 1)
return context
def forward(self, txt_enc, spk_emb, x, lens):
"""x<tensor>: duration or pitch or energy average"""
assert(txt_enc.size(2) >= x.size(1))
if len(x.shape) == 2:
# add channel dimension
x = x[:, None]
txt_enc = self.bottleneck_layer(txt_enc)
# lens including padded values
lens_grouped = (lens // self.n_group_size).long()
context = self.preprocess_context(txt_enc, spk_emb)
x = self.unfold(x[..., None])
log_s_list, log_det_W_list = [], []
for k in range(self.n_flows):
x, log_s = self.transforms[k](x, context, seq_lens=lens_grouped)
x, log_det_W = self.convinv[k](x)
log_det_W_list.append(log_det_W)
log_s_list.append(log_s)
# prepare outputs
outputs = {'z': x,
'log_det_W_list': log_det_W_list,
'log_s_list': log_s_list}
return outputs
def infer(self, z, txt_enc, spk_emb, seq_lens):
txt_enc = self.bottleneck_layer(txt_enc)
context = self.preprocess_context(txt_enc, spk_emb)
lens_grouped = (seq_lens // self.n_group_size).long()
z = self.unfold(z[..., None])
for k in reversed(range(self.n_flows)):
z = self.convinv[k](z, inverse=True)
z = self.transforms[k].forward(z, context,
inverse=True, seq_lens=lens_grouped)
# z mapped to input domain
x_hat = self.fold(z)
# pad on the way out
return x_hat
class AGAP(torch.nn.Module):
def __init__(self, n_in_dim, n_speaker_dim, n_flows, n_hidden,
n_lstm_layers, bottleneck_hparams, scaling_fn='exp',
take_log_of_input=False, p_dropout=0.0, setup='',
spline_flow_params=None, n_group_size=1):
super(AGAP, self).__init__()
self.flows = torch.nn.ModuleList()
self.n_group_size = n_group_size
self.n_speaker_dim = n_speaker_dim
self.attribute_processing = AttributeProcessing(take_log_of_input)
self.n_in_dim = n_in_dim
self.bottleneck_layer = BottleneckLayerLayer(**bottleneck_hparams)
n_txt_reduced_dim = self.bottleneck_layer.out_dim
if self.n_group_size > 1:
self.unfold_params = {'kernel_size': (n_group_size, 1),
'stride': n_group_size,
'padding': 0, 'dilation': 1}
self.unfold = nn.Unfold(**self.unfold_params)
if spline_flow_params is not None:
spline_flow_params['n_in_channels'] *= self.n_group_size
for i in range(n_flows):
if i % 2 == 0:
self.flows.append(AR_Step(
n_in_dim * n_group_size, n_speaker_dim, n_txt_reduced_dim *
n_group_size, n_hidden, n_lstm_layers, scaling_fn,
spline_flow_params))
else:
self.flows.append(AR_Back_Step(
n_in_dim * n_group_size, n_speaker_dim, n_txt_reduced_dim *
n_group_size, n_hidden, n_lstm_layers, scaling_fn,
spline_flow_params))
def fold(self, data):
"""Inverse of the self.unfold(data.unsqueeze(-1)) operation used for
the grouping or "squeeze" operation on input
Args:
data: B x C x T tensor of temporal data
"""
output_size = (data.shape[2]*self.n_group_size, 1)
data = nn.functional.fold(
data, output_size=output_size, **self.unfold_params).squeeze(-1)
return data
def preprocess_context(self, txt_emb, speaker_vecs):
if self.n_group_size > 1:
txt_emb = self.unfold(txt_emb[..., None])
speaker_vecs = speaker_vecs[..., None].expand(-1, -1, txt_emb.shape[2])
context = torch.cat((txt_emb, speaker_vecs), 1)
return context
def forward(self, txt_emb, spk_emb, x, lens):
"""x<tensor>: duration or pitch or energy average"""
x = x[:, None] if len(x.shape) == 2 else x # add channel dimension
if self.n_group_size > 1:
x = self.unfold(x[..., None])
x = x.permute(2, 0, 1) # permute to time, batch, dims
x = self.attribute_processing.normalize(x)
txt_emb = self.bottleneck_layer(txt_emb)
context = self.preprocess_context(txt_emb, spk_emb)
context = context.permute(2, 0, 1) # permute to time, batch, dims
lens_groupped = (lens / self.n_group_size).long()
log_s_list = []
for i, flow in enumerate(self.flows):
x, log_s = flow(x, context, lens_groupped)
log_s_list.append(log_s)
x = x.permute(1, 2, 0) # x mapped to z
log_s_list = [log_s_elt.permute(1, 2, 0) for log_s_elt in log_s_list]
outputs = {'z': x, 'log_s_list': log_s_list, 'log_det_W_list': []}
return outputs
def infer(self, z, txt_emb, spk_emb, seq_lens=None):
if self.n_group_size > 1:
n_frames = z.shape[2]
z = self.unfold(z[..., None])
z = z.permute(2, 0, 1) # permute to time, batch, dims
txt_emb = self.bottleneck_layer(txt_emb)
context = self.preprocess_context(txt_emb, spk_emb)
context = context.permute(2, 0, 1) # permute to time, batch, dims
for i, flow in enumerate(reversed(self.flows)):
z = flow.infer(z, context)
x_hat = z.permute(1, 2, 0)
if self.n_group_size > 1:
x_hat = self.fold(x_hat)
if n_frames > x_hat.shape[2]:
m = nn.ReflectionPad1d((0, n_frames - x_hat.shape[2]))
x_hat = m(x_hat)
x_hat = self.attribute_processing.denormalize(x_hat)
return x_hat
| radtts-main | attribute_prediction_model.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Based on https://github.com/NVIDIA/flowtron/blob/master/data.py
# Original license text:
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import os
import argparse
import json
import numpy as np
import lmdb
import pickle as pkl
import torch
import torch.utils.data
from scipy.io.wavfile import read
from audio_processing import TacotronSTFT
from tts_text_processing.text_processing import TextProcessing
from scipy.stats import betabinom
from librosa import pyin
from common import update_params
from scipy.ndimage import distance_transform_edt as distance_transform
def beta_binomial_prior_distribution(phoneme_count, mel_count,
scaling_factor=0.05):
P = phoneme_count
M = mel_count
x = np.arange(0, P)
mel_text_probs = []
for i in range(1, M+1):
a, b = scaling_factor*i, scaling_factor*(M+1-i)
rv = betabinom(P - 1, a, b)
mel_i_prob = rv.pmf(x)
mel_text_probs.append(mel_i_prob)
return torch.tensor(np.array(mel_text_probs))
def load_wav_to_torch(full_path):
""" Loads wavdata into torch array """
sampling_rate, data = read(full_path)
return torch.from_numpy(np.array(data)).float(), sampling_rate
class Data(torch.utils.data.Dataset):
def __init__(self, datasets, filter_length, hop_length, win_length,
sampling_rate, n_mel_channels, mel_fmin, mel_fmax, f0_min,
f0_max, max_wav_value, use_f0, use_energy_avg, use_log_f0,
use_scaled_energy, symbol_set, cleaner_names, heteronyms_path,
phoneme_dict_path, p_phoneme, handle_phoneme='word',
handle_phoneme_ambiguous='ignore', speaker_ids=None,
include_speakers=None, n_frames=-1,
use_attn_prior_masking=True, prepend_space_to_text=True,
append_space_to_text=True, add_bos_eos_to_text=False,
betabinom_cache_path="", betabinom_scaling_factor=0.05,
lmdb_cache_path="", dur_min=None, dur_max=None,
combine_speaker_and_emotion=False, **kwargs):
self.combine_speaker_and_emotion = combine_speaker_and_emotion
self.max_wav_value = max_wav_value
self.audio_lmdb_dict = {} # dictionary of lmdbs for audio data
self.data = self.load_data(datasets)
self.distance_tx_unvoiced = False
if 'distance_tx_unvoiced' in kwargs.keys():
self.distance_tx_unvoiced = kwargs['distance_tx_unvoiced']
self.stft = TacotronSTFT(filter_length=filter_length,
hop_length=hop_length,
win_length=win_length,
sampling_rate=sampling_rate,
n_mel_channels=n_mel_channels,
mel_fmin=mel_fmin, mel_fmax=mel_fmax)
self.do_mel_scaling = kwargs.get('do_mel_scaling', True)
self.mel_noise_scale = kwargs.get('mel_noise_scale', 0.0)
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.f0_min = f0_min
self.f0_max = f0_max
self.use_f0 = use_f0
self.use_log_f0 = use_log_f0
self.use_energy_avg = use_energy_avg
self.use_scaled_energy = use_scaled_energy
self.sampling_rate = sampling_rate
self.tp = TextProcessing(
symbol_set, cleaner_names, heteronyms_path, phoneme_dict_path,
p_phoneme=p_phoneme, handle_phoneme=handle_phoneme,
handle_phoneme_ambiguous=handle_phoneme_ambiguous,
prepend_space_to_text=prepend_space_to_text,
append_space_to_text=append_space_to_text,
add_bos_eos_to_text=add_bos_eos_to_text)
self.dur_min = dur_min
self.dur_max = dur_max
if speaker_ids is None or speaker_ids == '':
self.speaker_ids = self.create_speaker_lookup_table(self.data)
else:
self.speaker_ids = speaker_ids
print("Number of files", len(self.data))
if include_speakers is not None:
for (speaker_set, include) in include_speakers:
self.filter_by_speakers_(speaker_set, include)
print("Number of files after speaker filtering", len(self.data))
if dur_min is not None and dur_max is not None:
self.filter_by_duration_(dur_min, dur_max)
print("Number of files after duration filtering", len(self.data))
self.use_attn_prior_masking = bool(use_attn_prior_masking)
self.prepend_space_to_text = bool(prepend_space_to_text)
self.append_space_to_text = bool(append_space_to_text)
self.betabinom_cache_path = betabinom_cache_path
self.betabinom_scaling_factor = betabinom_scaling_factor
self.lmdb_cache_path = lmdb_cache_path
if self.lmdb_cache_path != "":
self.cache_data_lmdb = lmdb.open(
self.lmdb_cache_path, readonly=True, max_readers=1024,
lock=False).begin()
# make sure caching path exists
if not os.path.exists(self.betabinom_cache_path):
os.makedirs(self.betabinom_cache_path)
print("Dataloader initialized with no augmentations")
self.speaker_map = None
if 'speaker_map' in kwargs:
self.speaker_map = kwargs['speaker_map']
def load_data(self, datasets, split='|'):
dataset = []
for dset_name, dset_dict in datasets.items():
folder_path = dset_dict['basedir']
audiodir = dset_dict['audiodir']
filename = dset_dict['filelist']
audio_lmdb_key = None
if 'lmdbpath' in dset_dict.keys() and len(dset_dict['lmdbpath']) > 0:
self.audio_lmdb_dict[dset_name] = lmdb.open(
dset_dict['lmdbpath'], readonly=True, max_readers=256,
lock=False).begin()
audio_lmdb_key = dset_name
wav_folder_prefix = os.path.join(folder_path, audiodir)
filelist_path = os.path.join(folder_path, filename)
with open(filelist_path, encoding='utf-8') as f:
data = [line.strip().split(split) for line in f]
for d in data:
emotion = 'other' if len(d) == 3 else d[3]
duration = -1 if len(d) == 3 else d[4]
dataset.append(
{'audiopath': os.path.join(wav_folder_prefix, d[0]),
'text': d[1],
'speaker': d[2] + '-' + emotion if self.combine_speaker_and_emotion else d[2],
'emotion': emotion,
'duration': float(duration),
'lmdb_key': audio_lmdb_key
})
return dataset
def filter_by_speakers_(self, speakers, include=True):
print("Include spaker {}: {}".format(speakers, include))
if include:
self.data = [x for x in self.data if x['speaker'] in speakers]
else:
self.data = [x for x in self.data if x['speaker'] not in speakers]
def filter_by_duration_(self, dur_min, dur_max):
self.data = [
x for x in self.data
if x['duration'] == -1 or (
x['duration'] >= dur_min and x['duration'] <= dur_max)]
def create_speaker_lookup_table(self, data):
speaker_ids = np.sort(np.unique([x['speaker'] for x in data]))
d = {speaker_ids[i]: i for i in range(len(speaker_ids))}
print("Number of speakers:", len(d))
print("Speaker IDS", d)
return d
def f0_normalize(self, x):
if self.use_log_f0:
mask = x >= self.f0_min
x[mask] = torch.log(x[mask])
x[~mask] = 0.0
return x
def f0_denormalize(self, x):
if self.use_log_f0:
log_f0_min = np.log(self.f0_min)
mask = x >= log_f0_min
x[mask] = torch.exp(x[mask])
x[~mask] = 0.0
x[x <= 0.0] = 0.0
return x
def energy_avg_normalize(self, x):
if self.use_scaled_energy:
x = (x + 20.0) / 20.0
return x
def energy_avg_denormalize(self, x):
if self.use_scaled_energy:
x = x * 20.0 - 20.0
return x
def get_f0_pvoiced(self, audio, sampling_rate=22050, frame_length=1024,
hop_length=256, f0_min=100, f0_max=300):
audio_norm = audio / self.max_wav_value
f0, voiced_mask, p_voiced = pyin(
audio_norm, f0_min, f0_max, sampling_rate,
frame_length=frame_length, win_length=frame_length // 2,
hop_length=hop_length)
f0[~voiced_mask] = 0.0
f0 = torch.FloatTensor(f0)
p_voiced = torch.FloatTensor(p_voiced)
voiced_mask = torch.FloatTensor(voiced_mask)
return f0, voiced_mask, p_voiced
def get_energy_average(self, mel):
energy_avg = mel.mean(0)
energy_avg = self.energy_avg_normalize(energy_avg)
return energy_avg
def get_mel(self, audio):
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
if self.do_mel_scaling:
melspec = (melspec + 5.5) / 2
if self.mel_noise_scale > 0:
melspec += torch.randn_like(melspec) * self.mel_noise_scale
return melspec
def get_speaker_id(self, speaker):
if self.speaker_map is not None and speaker in self.speaker_map:
speaker = self.speaker_map[speaker]
return torch.LongTensor([self.speaker_ids[speaker]])
def get_text(self, text):
text = self.tp.encode_text(text)
text = torch.LongTensor(text)
return text
def get_attention_prior(self, n_tokens, n_frames):
# cache the entire attn_prior by filename
if self.use_attn_prior_masking:
filename = "{}_{}".format(n_tokens, n_frames)
prior_path = os.path.join(self.betabinom_cache_path, filename)
prior_path += "_prior.pth"
if self.lmdb_cache_path != "":
attn_prior = pkl.loads(
self.cache_data_lmdb.get(prior_path.encode('ascii')))
elif os.path.exists(prior_path):
attn_prior = torch.load(prior_path)
else:
attn_prior = beta_binomial_prior_distribution(
n_tokens, n_frames, self.betabinom_scaling_factor)
torch.save(attn_prior, prior_path)
else:
attn_prior = torch.ones(n_frames, n_tokens) # all ones baseline
return attn_prior
def __getitem__(self, index):
data = self.data[index]
audiopath, text = data['audiopath'], data['text']
speaker_id = data['speaker']
if data['lmdb_key'] is not None:
data_dict = pkl.loads(
self.audio_lmdb_dict[data['lmdb_key']].get(
audiopath.encode('ascii')))
audio = data_dict['audio']
sampling_rate = data_dict['sampling_rate']
else:
audio, sampling_rate = load_wav_to_torch(audiopath)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
mel = self.get_mel(audio)
f0 = None
p_voiced = None
voiced_mask = None
if self.use_f0:
filename = '_'.join(audiopath.split('/')[-3:])
f0_path = os.path.join(self.betabinom_cache_path, filename)
f0_path += "_f0_sr{}_fl{}_hl{}_f0min{}_f0max{}_log{}.pt".format(
self.sampling_rate, self.filter_length, self.hop_length,
self.f0_min, self.f0_max, self.use_log_f0)
dikt = None
if len(self.lmdb_cache_path) > 0:
dikt = pkl.loads(
self.cache_data_lmdb.get(f0_path.encode('ascii')))
f0 = dikt['f0']
p_voiced = dikt['p_voiced']
voiced_mask = dikt['voiced_mask']
elif os.path.exists(f0_path):
try:
dikt = torch.load(f0_path)
except:
print(f"f0 loading from {f0_path} is broken, recomputing.")
if dikt is not None:
f0 = dikt['f0']
p_voiced = dikt['p_voiced']
voiced_mask = dikt['voiced_mask']
else:
f0, voiced_mask, p_voiced = self.get_f0_pvoiced(
audio.cpu().numpy(), self.sampling_rate,
self.filter_length, self.hop_length, self.f0_min,
self.f0_max)
print("saving f0 to {}".format(f0_path))
torch.save({'f0': f0,
'voiced_mask': voiced_mask,
'p_voiced': p_voiced}, f0_path)
if f0 is None:
raise Exception("STOP, BROKEN F0 {}".format(audiopath))
f0 = self.f0_normalize(f0)
if self.distance_tx_unvoiced:
mask = f0 <= 0.0
distance_map = np.log(distance_transform(mask))
distance_map[distance_map <= 0] = 0.0
f0 = f0 - distance_map
energy_avg = None
if self.use_energy_avg:
energy_avg = self.get_energy_average(mel)
if self.use_scaled_energy and energy_avg.min() < 0.0:
print(audiopath, "has scaled energy avg smaller than 0")
speaker_id = self.get_speaker_id(speaker_id)
text_encoded = self.get_text(text)
attn_prior = self.get_attention_prior(
text_encoded.shape[0], mel.shape[1])
if not self.use_attn_prior_masking:
attn_prior = None
return {'mel': mel,
'speaker_id': speaker_id,
'text_encoded': text_encoded,
'audiopath': audiopath,
'attn_prior': attn_prior,
'f0': f0,
'p_voiced': p_voiced,
'voiced_mask': voiced_mask,
'energy_avg': energy_avg,
}
def __len__(self):
return len(self.data)
class DataCollate():
""" Zero-pads model inputs and targets given number of steps """
def __init__(self, n_frames_per_step=1):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate from normalized data """
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x['text_encoded']) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]['text_encoded']
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mel_channels = batch[0]['mel'].size(0)
max_target_len = max([x['mel'].size(1) for x in batch])
# include mel padded, gate padded and speaker ids
mel_padded = torch.FloatTensor(len(batch), num_mel_channels, max_target_len)
mel_padded.zero_()
f0_padded = None
p_voiced_padded = None
voiced_mask_padded = None
energy_avg_padded = None
if batch[0]['f0'] is not None:
f0_padded = torch.FloatTensor(len(batch), max_target_len)
f0_padded.zero_()
if batch[0]['p_voiced'] is not None:
p_voiced_padded = torch.FloatTensor(len(batch), max_target_len)
p_voiced_padded.zero_()
if batch[0]['voiced_mask'] is not None:
voiced_mask_padded = torch.FloatTensor(len(batch), max_target_len)
voiced_mask_padded.zero_()
if batch[0]['energy_avg'] is not None:
energy_avg_padded = torch.FloatTensor(len(batch), max_target_len)
energy_avg_padded.zero_()
attn_prior_padded = torch.FloatTensor(len(batch), max_target_len, max_input_len)
attn_prior_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
speaker_ids = torch.LongTensor(len(batch))
audiopaths = []
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]]['mel']
mel_padded[i, :, :mel.size(1)] = mel
if batch[ids_sorted_decreasing[i]]['f0'] is not None:
f0 = batch[ids_sorted_decreasing[i]]['f0']
f0_padded[i, :len(f0)] = f0
if batch[ids_sorted_decreasing[i]]['voiced_mask'] is not None:
voiced_mask = batch[ids_sorted_decreasing[i]]['voiced_mask']
voiced_mask_padded[i, :len(f0)] = voiced_mask
if batch[ids_sorted_decreasing[i]]['p_voiced'] is not None:
p_voiced = batch[ids_sorted_decreasing[i]]['p_voiced']
p_voiced_padded[i, :len(f0)] = p_voiced
if batch[ids_sorted_decreasing[i]]['energy_avg'] is not None:
energy_avg = batch[ids_sorted_decreasing[i]]['energy_avg']
energy_avg_padded[i, :len(energy_avg)] = energy_avg
output_lengths[i] = mel.size(1)
speaker_ids[i] = batch[ids_sorted_decreasing[i]]['speaker_id']
audiopath = batch[ids_sorted_decreasing[i]]['audiopath']
audiopaths.append(audiopath)
cur_attn_prior = batch[ids_sorted_decreasing[i]]['attn_prior']
if cur_attn_prior is None:
attn_prior_padded = None
else:
attn_prior_padded[i, :cur_attn_prior.size(0), :cur_attn_prior.size(1)] = cur_attn_prior
return {'mel': mel_padded,
'speaker_ids': speaker_ids,
'text': text_padded,
'input_lengths': input_lengths,
'output_lengths': output_lengths,
'audiopaths': audiopaths,
'attn_prior': attn_prior_padded,
'f0': f0_padded,
'p_voiced': p_voiced_padded,
'voiced_mask': voiced_mask_padded,
'energy_avg': energy_avg_padded
}
# ===================================================================
# Takes directory of clean audio and makes directory of spectrograms
# Useful for making test sets
# ===================================================================
if __name__ == "__main__":
# Get defaults so it can work with no Sacred
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-p', '--params', nargs='+', default=[])
args = parser.parse_args()
args.rank = 0
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
update_params(config, args.params)
print(config)
data_config = config["data_config"]
ignore_keys = ['training_files', 'validation_files']
trainset = Data(data_config['training_files'],
**dict((k, v) for k, v in data_config.items()
if k not in ignore_keys))
valset = Data(data_config['validation_files'],
**dict((k, v) for k, v in data_config.items()
if k not in ignore_keys), speaker_ids=trainset.speaker_ids)
collate_fn = DataCollate()
for dataset in (trainset, valset):
for i, batch in enumerate(dataset):
out = batch
print("{}/{}".format(i, len(dataset)))
| radtts-main | data.py |
""" adapted from https://github.com/keithito/tacotron """
import re
_letters_and_numbers_re = re.compile(
r"((?:[a-zA-Z]+[0-9]|[0-9]+[a-zA-Z])[a-zA-Z0-9']*)", re.IGNORECASE)
_hardware_re = re.compile(
'([0-9]+(?:[.,][0-9]+)?)(?:\s?)(tb|gb|mb|kb|ghz|mhz|khz|hz|mm)', re.IGNORECASE)
_hardware_key = {'tb': 'terabyte',
'gb': 'gigabyte',
'mb': 'megabyte',
'kb': 'kilobyte',
'ghz': 'gigahertz',
'mhz': 'megahertz',
'khz': 'kilohertz',
'hz': 'hertz',
'mm': 'millimeter',
'cm': 'centimeter',
'km': 'kilometer'}
_dimension_re = re.compile(
r'\b(\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?(?:in|inch|m)?)\b|\b(\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?(?:in|inch|m)?)\b')
_dimension_key = {'m': 'meter',
'in': 'inch',
'inch': 'inch'}
def _expand_letters_and_numbers(m):
text = re.split(r'(\d+)', m.group(0))
# remove trailing space
if text[-1] == '':
text = text[:-1]
elif text[0] == '':
text = text[1:]
# if not like 1920s, or AK47's , 20th, 1st, 2nd, 3rd, etc...
if text[-1] in ("'s", "s", "th", "nd", "st", "rd") and text[-2].isdigit():
text[-2] = text[-2] + text[-1]
text = text[:-1]
# for combining digits 2 by 2
new_text = []
for i in range(len(text)):
string = text[i]
if string.isdigit() and len(string) < 5:
# heuristics
if len(string) > 2 and string[-2] == '0':
if string[-1] == '0':
string = [string]
else:
string = [string[:-3], string[-2], string[-1]]
elif len(string) % 2 == 0:
string = [string[i:i+2] for i in range(0, len(string), 2)]
elif len(string) > 2:
string = [string[0]] + [string[i:i+2] for i in range(1, len(string), 2)]
new_text.extend(string)
else:
new_text.append(string)
text = new_text
text = " ".join(text)
return text
def _expand_hardware(m):
quantity, measure = m.groups(0)
measure = _hardware_key[measure.lower()]
if measure[-1] != 'z' and float(quantity.replace(',', '')) > 1:
return "{} {}s".format(quantity, measure)
return "{} {}".format(quantity, measure)
def _expand_dimension(m):
text = "".join([x for x in m.groups(0) if x != 0])
text = text.replace(' x ', ' by ')
text = text.replace('x', ' by ')
if text.endswith(tuple(_dimension_key.keys())):
if text[-2].isdigit():
text = "{} {}".format(text[:-1], _dimension_key[text[-1:]])
elif text[-3].isdigit():
text = "{} {}".format(text[:-2], _dimension_key[text[-2:]])
return text
def normalize_letters_and_numbers(text):
text = re.sub(_hardware_re, _expand_hardware, text)
text = re.sub(_dimension_re, _expand_dimension, text)
text = re.sub(_letters_and_numbers_re, _expand_letters_and_numbers, text)
return text
| radtts-main | tts_text_processing/letters_and_numbers.py |
""" adapted from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| radtts-main | tts_text_processing/cmudict.py |
import re
_no_period_re = re.compile(r'(No[.])(?=[ ]?[0-9])')
_percent_re = re.compile(r'([ ]?[%])')
_half_re = re.compile('([0-9]½)|(½)')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('ms', 'miss'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def _expand_no_period(m):
word = m.group(0)
if word[0] == 'N':
return 'Number'
return 'number'
def _expand_percent(m):
return ' percent'
def _expand_half(m):
word = m.group(1)
if word is None:
return 'half'
return word[0] + ' and a half'
def normalize_abbreviations(text):
text = re.sub(_no_period_re, _expand_no_period, text)
text = re.sub(_percent_re, _expand_percent, text)
text = re.sub(_half_re, _expand_half, text)
return text
| radtts-main | tts_text_processing/abbreviations.py |
""" adapted from https://github.com/keithito/tacotron """
import re
import numpy as np
from . import cleaners
from .cleaners import Cleaner
from .symbols import get_symbols
from .grapheme_dictionary import Grapheme2PhonemeDictionary
#########
# REGEX #
#########
# Regular expression matching text enclosed in curly braces for encoding
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching words and not words
_words_re = re.compile(r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]+|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)")
def lines_to_list(filename):
with open(filename, encoding='utf-8') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
class TextProcessing(object):
def __init__(self, symbol_set, cleaner_name, heteronyms_path,
phoneme_dict_path, p_phoneme, handle_phoneme,
handle_phoneme_ambiguous, prepend_space_to_text=False,
append_space_to_text=False, add_bos_eos_to_text=False,
encoding='latin-1'):
if heteronyms_path is not None and heteronyms_path != '':
self.heteronyms = set(lines_to_list(heteronyms_path))
else:
self.heteronyms = []
# phoneme dict
self.phonemedict = Grapheme2PhonemeDictionary(
phoneme_dict_path, encoding=encoding)
self.p_phoneme = p_phoneme
self.handle_phoneme = handle_phoneme
self.handle_phoneme_ambiguous = handle_phoneme_ambiguous
self.symbols = get_symbols(symbol_set)
self.cleaner_names = cleaner_name
self.cleaner = Cleaner(cleaner_name, self.phonemedict)
self.prepend_space_to_text = prepend_space_to_text
self.append_space_to_text = append_space_to_text
self.add_bos_eos_to_text = add_bos_eos_to_text
if add_bos_eos_to_text:
self.symbols.append('<bos>')
self.symbols.append('<eos>')
# Mappings from symbol to numeric ID and vice versa:
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as phoneme:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self.symbols_to_sequence(text)
break
sequence += self.symbols_to_sequence(m.group(1))
sequence += self.phoneme_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(self, sequence):
result = ''
for symbol_id in sequence:
if symbol_id in self.id_to_symbol:
s = self.id_to_symbol[symbol_id]
# Enclose phoneme back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def clean_text(self, text):
text = self.cleaner(text)
return text
def symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if s in self.symbol_to_id]
def phoneme_to_sequence(self, text):
return self.symbols_to_sequence(['@' + s for s in text.split()])
def get_phoneme(self, word):
phoneme_suffix = ''
if word.lower() in self.heteronyms:
return word
if len(word) > 2 and word.endswith("'s"):
phoneme = self.phonemedict.lookup(word)
if phoneme is None:
phoneme = self.phonemedict.lookup(word[:-2])
phoneme_suffix = '' if phoneme is None else ' Z'
elif len(word) > 1 and word.endswith("s"):
phoneme = self.phonemedict.lookup(word)
if phoneme is None:
phoneme = self.phonemedict.lookup(word[:-1])
phoneme_suffix = '' if phoneme is None else ' Z'
else:
phoneme = self.phonemedict.lookup(word)
if phoneme is None:
return word
if len(phoneme) > 1:
if self.handle_phoneme_ambiguous == 'first':
phoneme = phoneme[0]
elif self.handle_phoneme_ambiguous == 'random':
phoneme = np.random.choice(phoneme)
elif self.handle_phoneme_ambiguous == 'ignore':
return word
else:
phoneme = phoneme[0]
phoneme = "{" + phoneme + phoneme_suffix + "}"
return phoneme
def encode_text(self, text, return_all=False):
text_clean = self.clean_text(text)
text = text_clean
text_phoneme = ''
if self.p_phoneme > 0:
text_phoneme = self.convert_to_phoneme(text)
text = text_phoneme
text_encoded = self.text_to_sequence(text)
if self.prepend_space_to_text:
text_encoded.insert(0, self.symbol_to_id[' '])
if self.append_space_to_text:
text_encoded.append(self.symbol_to_id[' '])
if self.add_bos_eos_to_text:
text_encoded.insert(0, self.symbol_to_id['<bos>'])
text_encoded.append(self.symbol_to_id['<eos>'])
if return_all:
return text_encoded, text_clean, text_phoneme
return text_encoded
def convert_to_phoneme(self, text):
if self.handle_phoneme == 'sentence':
if np.random.uniform() < self.p_phoneme:
words = _words_re.findall(text)
text_phoneme = [
self.get_phoneme(word[0])
if (word[0] != '') else re.sub(r'\s(\d)', r'\1', word[1].upper())
for word in words]
text_phoneme = ''.join(text_phoneme)
text = text_phoneme
elif self.handle_phoneme == 'word':
words = _words_re.findall(text)
text_phoneme = [
re.sub(r'\s(\d)', r'\1', word[1].upper()) if word[0] == '' else (
self.get_phoneme(word[0])
if np.random.uniform() < self.p_phoneme
else word[0])
for word in words]
text_phoneme = ''.join(text_phoneme)
text = text_phoneme
elif self.handle_phoneme != '':
raise Exception("{} handle_phoneme is not supported".format(
self.handle_phoneme))
return text
| radtts-main | tts_text_processing/text_processing.py |
""" adapted from https://github.com/keithito/tacotron """
import inflect
import re
_magnitudes = ['trillion', 'billion', 'million', 'thousand', 'hundred', 'm', 'b', 't']
_magnitudes_key = {'m': 'million', 'b': 'billion', 't': 'trillion'}
_measurements = '(f|c|k|d|m)'
_measurements_key = {'f': 'fahrenheit',
'c': 'celsius',
'k': 'thousand',
'm': 'meters'}
_currency_key = {'$': 'dollar', '£': 'pound', '€': 'euro', '₩': 'won'}
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_currency_re = re.compile(r'([\$€£₩])([0-9\.\,]*[0-9]+)(?:[ ]?({})(?=[^a-zA-Z]))?'.format("|".join(_magnitudes)), re.IGNORECASE)
_measurement_re = re.compile(r'([0-9\.\,]*[0-9]+(\s)?{}\b)'.format(_measurements), re.IGNORECASE)
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
# _range_re = re.compile(r'(?<=[0-9])+(-)(?=[0-9])+.*?')
_roman_re = re.compile(r'\b(?=[MDCLXVI]+\b)M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{2,3})\b') # avoid I
_multiply_re = re.compile(r'(\b[0-9]+)(x)([0-9]+)')
_number_re = re.compile(r"[0-9]+'s|[0-9]+s|[0-9]+")
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_currency(m):
currency = _currency_key[m.group(1)]
quantity = m.group(2)
magnitude = m.group(3)
# remove commas from quantity to be able to convert to numerical
quantity = quantity.replace(',', '')
# check for million, billion, etc...
if magnitude is not None and magnitude.lower() in _magnitudes:
if len(magnitude) == 1:
magnitude = _magnitudes_key[magnitude.lower()]
return "{} {} {}".format(_expand_hundreds(quantity), magnitude, currency+'s')
parts = quantity.split('.')
if len(parts) > 2:
return quantity + " " + currency + "s" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = currency if dollars == 1 else currency+'s'
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}, {} {}".format(
_expand_hundreds(dollars), dollar_unit,
_inflect.number_to_words(cents), cent_unit)
elif dollars:
dollar_unit = currency if dollars == 1 else currency+'s'
return "{} {}".format(_expand_hundreds(dollars), dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}".format(_inflect.number_to_words(cents), cent_unit)
else:
return 'zero' + ' ' + currency + 's'
def _expand_hundreds(text):
number = float(text)
if number > 1000 < 10000 and (number % 100 == 0) and (number % 1000 != 0):
return _inflect.number_to_words(int(number / 100)) + " hundred"
else:
return _inflect.number_to_words(text)
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_measurement(m):
_, number, measurement = re.split('(\d+(?:\.\d+)?)', m.group(0))
number = _inflect.number_to_words(number)
measurement = "".join(measurement.split())
measurement = _measurements_key[measurement.lower()]
return "{} {}".format(number, measurement)
def _expand_range(m):
return ' to '
def _expand_multiply(m):
left = m.group(1)
right = m.group(3)
return "{} by {}".format(left, right)
def _expand_roman(m):
# from https://stackoverflow.com/questions/19308177/converting-roman-numerals-to-integers-in-python
roman_numerals = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
num = m.group(0)
for i, c in enumerate(num):
if (i+1) == len(num) or roman_numerals[c] >= roman_numerals[num[i+1]]:
result += roman_numerals[c]
else:
result -= roman_numerals[c]
return str(result)
def _expand_number(m):
_, number, suffix = re.split(r"(\d+(?:'?\d+)?)", m.group(0))
number = int(number)
if number > 1000 and number < 10000 and (number % 100 == 0) and (number % 1000 != 0):
text = _inflect.number_to_words(number // 100) + " hundred"
elif number > 1000 and number < 3000:
if number == 2000:
text = 'two thousand'
elif number > 2000 and number < 2010:
text = 'two thousand ' + _inflect.number_to_words(number % 100)
elif number % 100 == 0:
text = _inflect.number_to_words(number // 100) + ' hundred'
else:
number = _inflect.number_to_words(number, andword='', zero='oh', group=2).replace(', ', ' ')
number = re.sub(r'-', ' ', number)
text = number
else:
number = _inflect.number_to_words(number, andword='and')
number = re.sub(r'-', ' ', number)
number = re.sub(r',', '', number)
text = number
if suffix in ("'s", "s"):
if text[-1] == 'y':
text = text[:-1] + 'ies'
else:
text = text + suffix
return text
def normalize_currency(text):
return re.sub(_currency_re, _expand_currency, text)
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_currency_re, _expand_currency, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
# text = re.sub(_range_re, _expand_range, text)
# text = re.sub(_measurement_re, _expand_measurement, text)
text = re.sub(_roman_re, _expand_roman, text)
text = re.sub(_multiply_re, _expand_multiply, text)
text = re.sub(_number_re, _expand_number, text)
return text
| radtts-main | tts_text_processing/numerical.py |
""" adapted from https://github.com/keithito/tacotron """
import re
_alt_re = re.compile(r'\([0-9]+\)')
class Grapheme2PhonemeDictionary:
"""Thin wrapper around g2p data."""
def __init__(self, file_or_path, keep_ambiguous=True, encoding='latin-1'):
with open(file_or_path, encoding=encoding) as f:
entries = _parse_g2p(f)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items()
if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
"""Returns list of pronunciations of the given word."""
return self._entries.get(word.upper())
def _parse_g2p(file):
g2p = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = parts[1].strip()
if word in g2p:
g2p[word].append(pronunciation)
else:
g2p[word] = [pronunciation]
return g2p
| radtts-main | tts_text_processing/grapheme_dictionary.py |
""" adapted from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text
that has been run through Unidecode. For other data, you can modify
_characters.'''
arpabet = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1',
'AH2', 'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0',
'AY1', 'AY2', 'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0',
'ER1', 'ER2', 'EY', 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1',
'IH2', 'IY', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW',
'OW0', 'OW1', 'OW2', 'OY', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T',
'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW', 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y',
'Z', 'ZH'
]
def get_symbols(symbol_set):
if symbol_set == 'english_basic':
_pad = '_'
_punctuation = '!\'"(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_arpabet = ["@" + s for s in arpabet]
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_basic_lowercase':
_pad = '_'
_punctuation = '!\'"(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
_arpabet = ["@" + s for s in arpabet]
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_expanded':
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_arpabet = ["@" + s for s in arpabet]
symbols = list(_punctuation + _math + _special + _accented + _letters) + _arpabet
elif symbol_set == 'radtts':
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_numbers = '0123456789'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_arpabet = ["@" + s for s in arpabet]
symbols = list(_punctuation + _math + _special + _accented + _numbers + _letters) + _arpabet
else:
raise Exception("{} symbol set does not exist".format(symbol_set))
return symbols
| radtts-main | tts_text_processing/symbols.py |
""" adapted from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from string import punctuation
from functools import reduce
from unidecode import unidecode
from .numerical import normalize_numbers, normalize_currency
from .acronyms import AcronymNormalizer
from .datestime import normalize_datestime
from .letters_and_numbers import normalize_letters_and_numbers
from .abbreviations import normalize_abbreviations
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# Regular expression separating words enclosed in curly braces for cleaning
_arpa_re = re.compile(r'{[^}]+}|\S+')
def expand_abbreviations(text):
return normalize_abbreviations(text)
def expand_numbers(text):
return normalize_numbers(text)
def expand_currency(text):
return normalize_currency(text)
def expand_datestime(text):
return normalize_datestime(text)
def expand_letters_and_numbers(text):
return normalize_letters_and_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def separate_acronyms(text):
text = re.sub(r"([0-9]+)([a-zA-Z]+)", r"\1 \2", text)
text = re.sub(r"([a-zA-Z]+)([0-9]+)", r"\1 \2", text)
return text
def convert_to_ascii(text):
return unidecode(text)
def dehyphenize_compound_words(text):
text = re.sub(r'(?<=[a-zA-Z0-9])-(?=[a-zA-Z])', ' ', text)
return text
def remove_space_before_punctuation(text):
return re.sub(r"\s([{}](?:\s|$))".format(punctuation), r'\1', text)
class Cleaner(object):
def __init__(self, cleaner_names, phonemedict):
self.cleaner_names = cleaner_names
self.phonemedict = phonemedict
self.acronym_normalizer = AcronymNormalizer(self.phonemedict)
def __call__(self, text):
for cleaner_name in self.cleaner_names:
sequence_fns, word_fns = self.get_cleaner_fns(cleaner_name)
for fn in sequence_fns:
text = fn(text)
text = [reduce(lambda x, y: y(x), word_fns, split)
if split[0] != '{' else split
for split in _arpa_re.findall(text)]
text = ' '.join(text)
text = remove_space_before_punctuation(text)
return text
def get_cleaner_fns(self, cleaner_name):
if cleaner_name == 'basic_cleaners':
sequence_fns = [lowercase, collapse_whitespace]
word_fns = []
elif cleaner_name == 'english_cleaners':
sequence_fns = [collapse_whitespace, convert_to_ascii, lowercase]
word_fns = [expand_numbers, expand_abbreviations]
elif cleaner_name == 'radtts_cleaners':
sequence_fns = [collapse_whitespace, expand_currency,
expand_datestime, expand_letters_and_numbers]
word_fns = [expand_numbers, expand_abbreviations]
elif cleaner_name == 'transliteration_cleaners':
sequence_fns = [convert_to_ascii, lowercase, collapse_whitespace]
else:
raise Exception("{} cleaner not supported".format(cleaner_name))
return sequence_fns, word_fns
| radtts-main | tts_text_processing/cleaners.py |
""" adapted from https://github.com/keithito/tacotron """
import re
_ampm_re = re.compile(
r'([0-9]|0[0-9]|1[0-9]|2[0-3]):?([0-5][0-9])?\s*([AaPp][Mm]\b)')
def _expand_ampm(m):
matches = list(m.groups(0))
txt = matches[0]
txt = txt if int(matches[1]) == 0 else txt + ' ' + matches[1]
if matches[2][0].lower() == 'a':
txt += ' a.m.'
elif matches[2][0].lower() == 'p':
txt += ' p.m.'
return txt
def normalize_datestime(text):
text = re.sub(_ampm_re, _expand_ampm, text)
#text = re.sub(r"([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])?", r"\1 \2", text)
return text
| radtts-main | tts_text_processing/datestime.py |
import re
from .cmudict import CMUDict
_letter_to_arpabet = {
'A': 'EY1',
'B': 'B IY1',
'C': 'S IY1',
'D': 'D IY1',
'E': 'IY1',
'F': 'EH1 F',
'G': 'JH IY1',
'H': 'EY1 CH',
'I': 'AY1',
'J': 'JH EY1',
'K': 'K EY1',
'L': 'EH1 L',
'M': 'EH1 M',
'N': 'EH1 N',
'O': 'OW1',
'P': 'P IY1',
'Q': 'K Y UW1',
'R': 'AA1 R',
'S': 'EH1 S',
'T': 'T IY1',
'U': 'Y UW1',
'V': 'V IY1',
'X': 'EH1 K S',
'Y': 'W AY1',
'W': 'D AH1 B AH0 L Y UW0',
'Z': 'Z IY1',
's': 'Z'
}
# must ignore roman numerals
# _acronym_re = re.compile(r'([A-Z][A-Z]+)s?|([A-Z]\.([A-Z]\.)+s?)')
_acronym_re = re.compile(r'([A-Z][A-Z]+)s?')
class AcronymNormalizer(object):
def __init__(self, phoneme_dict):
self.phoneme_dict = phoneme_dict
def normalize_acronyms(self, text):
def _expand_acronyms(m, add_spaces=True):
acronym = m.group(0)
# remove dots if they exist
acronym = re.sub('\.', '', acronym)
acronym = "".join(acronym.split())
arpabet = self.phoneme_dict.lookup(acronym)
if arpabet is None:
acronym = list(acronym)
arpabet = ["{" + _letter_to_arpabet[letter] + "}" for letter in acronym]
# temporary fix
if arpabet[-1] == '{Z}' and len(arpabet) > 1:
arpabet[-2] = arpabet[-2][:-1] + ' ' + arpabet[-1][1:]
del arpabet[-1]
arpabet = ' '.join(arpabet)
elif len(arpabet) == 1:
arpabet = "{" + arpabet[0] + "}"
else:
arpabet = acronym
return arpabet
text = re.sub(_acronym_re, _expand_acronyms, text)
return text
def __call__(self, text):
return self.normalize_acronyms(text)
| radtts-main | tts_text_processing/acronyms.py |
""" Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
This script is for dumping the gaussian landmark predictions,
provided the dataset and trained landmark model checkpoint.
This is necessary for evaluating the landmark quality (on BBC)
as well as for performing the video manipulation tasks.
"""
import torch
import argparse
import pickle
import os
import numpy as np
from torch.utils.data import DataLoader
from dataloaders.bbc_pose_dataset import BBCPoseLandmarkEvalDataset
from utils.utils import parse_all_args, load_weights, get_model
from copy import deepcopy
def setup_dataloaders(config):
# setup the dataset
num_workers = config['num_workers']
val_dataloader = None
# For both video manipulation and landmark evaluation (regression to annotated keypoints)
if config['dataset'] == 'bbc_pose':
train_dataset = BBCPoseLandmarkEvalDataset(config, 'train')
val_dataset = BBCPoseLandmarkEvalDataset(config, 'val')
test_dataset = BBCPoseLandmarkEvalDataset(config, 'test')
# validation set for model selection based on landmark evaluation.
val_dataloader = DataLoader(val_dataset, batch_size=config['batch_size'],
shuffle=False, num_workers=num_workers)
else:
print("unrecognized dataset!")
exit(1)
train_dataloader = DataLoader(train_dataset, batch_size=config['batch_size'],
shuffle=False, num_workers=num_workers)
test_dataloader = DataLoader(test_dataset, batch_size=config['batch_size'],
shuffle=False, num_workers=num_workers)
return train_dataloader, test_dataloader, val_dataloader
def convert_encoding(config, model, dataloader):
"""
iterate the data and extract the tensors we need
"""
all_preds = []
all_preds_cov = []
all_preds_cov_values = []
all_vid_idx = []
all_frame_idx = []
all_gt_kpts = []
all_bboxes = []
print('length of dataset: ', len(dataloader))
for cnt, curr_batch in enumerate(dataloader):
if cnt % 10 == 0:
print('cnt', cnt, 'total', len(dataloader))
# im will be b x c x 128 x 128
# gt_keypoints will be b x 10
# this avoids a shared memory problem when num_workers > 0 (hopefully)
curr_batch_cpy = deepcopy(curr_batch)
del curr_batch
curr_batch = curr_batch_cpy
vid_idx = deepcopy(curr_batch['vid_idx']).numpy()
frame_idx = curr_batch['img_idx'].numpy()
im = deepcopy(curr_batch['input_a'])
if config['dataset'] == 'bbc_pose':
all_gt_kpts.append(curr_batch['gt_kpts'].numpy())
all_bboxes.append(curr_batch['bbox'].numpy())
output_dict = model(im.cuda())
heatmap_centers = output_dict['vis_centers']
heatmap_centers_x = heatmap_centers[0].cpu()
heatmap_centers_y = heatmap_centers[1].cpu()
heatmap_cov = output_dict['vis_cov'].cpu()
heatmap_centers_cat = torch.cat((heatmap_centers_x, heatmap_centers_y), 1)
all_vid_idx.append(vid_idx)
all_frame_idx.append(frame_idx)
all_preds.append(heatmap_centers_cat.cpu().detach().numpy().astype('float16'))
# if cov is fitted, save original and after decomposing
if not config['use_identity_covariance']:
cov_chol = torch.cholesky(heatmap_cov)
all_preds_cov_values.append(cov_chol.cpu().detach().numpy().astype('float16'))
all_preds_cov.append(heatmap_cov.detach().numpy().astype('float16'))
all_preds_cat = np.concatenate(all_preds, 0)
all_vid_idx = np.concatenate(all_vid_idx, 0)
all_frame_idx = np.concatenate(all_frame_idx, 0)
# currently only bbc has GT keypoints for evaluation
if config['dataset'] == 'bbc_pose':
all_bboxes = np.concatenate(all_bboxes, 0)
all_gt_kpts = np.concatenate(all_gt_kpts, 0)
if not config['use_identity_covariance']:
all_preds_cov_values = np.concatenate(all_preds_cov_values, 0)
all_preds_cov = np.concatenate(all_preds_cov, 0)
return all_preds_cat, all_preds_cov, all_preds_cov_values, all_vid_idx, all_frame_idx, all_bboxes, all_gt_kpts
def save_files(x, x_cov, x_cov_values, vid, frame, bboxes, gt, out_dir):
results = {}
if not os.path.exists(out_dir):
os.makedirs(out_dir)
outname = os.path.join(out_dir, 'gaussians.pkl3')
results['predictions_mean'] = x
results['predictions_cov'] = x_cov
results['predictions_cov_decomp'] = x_cov_values
results['vid'] = vid
results['frame'] = frame
results['bboxes'] = bboxes
results['gt'] = gt
with open(outname, 'wb') as handle:
pickle.dump(results, handle, protocol=3)
def eval_encoding(config, model, train_dataloader, test_dataloader, val_dataloader):
preds_cat, preds_cov, preds_cov_values, vid_idx, frame_idx, bboxes, gt = convert_encoding(config, model, test_dataloader)
# multiply test ground truth keypoints by 0 to avoid any potential leakage of test annotations
save_files(preds_cat, preds_cov, preds_cov_values, vid_idx, frame_idx, bboxes, 0*gt, config['gaussians_save_path'] + '/test')
preds_cat, preds_cov, preds_cov_values, vid_idx, frame_idx, bboxes, gt = convert_encoding(config, model, train_dataloader)
save_files(preds_cat, preds_cov, preds_cov_values, vid_idx, frame_idx, bboxes, gt, config['gaussians_save_path'] + '/train')
if val_dataloader is not None:
# multiply val ground truth keypoints by 0 to avoid any potential leakage of validation set annotations
preds_cat, preds_cov, preds_cov_values, vid_idx, frame_idx, bboxes, gt = convert_encoding(config, model, val_dataloader)
save_files(preds_cat, preds_cov, preds_cov_values, vid_idx, frame_idx, bboxes, 0*gt, config['gaussians_save_path'] + '/val')
def main(config):
print(config)
# initialize model
model = get_model(config)
# load weights from checkpoint
state_dict = load_weights(config['resume_ckpt'])
model.load_state_dict(state_dict)
model.cuda()
model.eval()
train_dataloader, test_dataloader, val_dataloader = setup_dataloaders(config)
eval_encoding(config, model, train_dataloader, test_dataloader, val_dataloader)
if __name__ == '__main__':
# load yaml
parser = argparse.ArgumentParser(description='')
parser.add_argument('--config', type=str)
parser.add_argument('--gaussians_save_path', type=str)
config, args = parse_all_args(parser, 'configs/defaults.yaml', return_args=True)
config['gaussians_save_path'] = args.gaussians_save_path
config['no_verbose'] = True
main(config)
| UnsupervisedLandmarkLearning-master | dump_preds.py |
"""Main training script. Currently only supports the BBCPose dataset
"""
from apex.parallel import DistributedDataParallel as DDP
from utils.visualizer import dump_image, project_heatmaps_colorized
from models.losses import Vgg19PerceptualLoss, GANLoss
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from dataloaders.bbc_pose_dataset import BBCPoseDataset
from torch import optim
from models.discriminator import MultiscaleDiscriminator
from utils.utils import initialize_distributed, parse_all_args,\
get_learning_rate, log_iter, save_options, reduce_tensor, \
get_model, load_weights, save_weights
import torch
import numpy as np
import torch.nn.functional
import torch.nn as nn
import os
import argparse
torch.backends.cudnn.benchmark = True
def init_weights(m):
if type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.1)
def setup_dataloaders(config):
"""Setup dataloaders for respective datasets
Args:
config (dict): dictionary of runtime configuration options
Returns:
train_dataloader (torch.utils.data.Dataloader): Dataloader for training split.
val_dataloader (torch.utils.data.Dataloader): Dataloader for validation split.
train_sampler (torch.utils.data.distributed.DistributedSampler): DDP sampler if using DDP.
"""
# setup the dataset
if config['dataset'] == 'bbc_pose':
train_dataset = BBCPoseDataset(config, 'train')
val_dataset = BBCPoseDataset(config, 'validation')
else:
print("No such dataset!")
exit(-1)
# distributed sampler if world size > 1
train_sampler = None
val_sampler = None
if config['use_DDP']:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
# wrap the datasets in a dataloader
train_dataloader = DataLoader(train_dataset,
batch_size=config['batch_size'],
shuffle=(train_sampler is None),
num_workers=config['num_workers'],
pin_memory=True,
sampler=train_sampler)
val_dataloader = DataLoader(val_dataset,
batch_size=config['batch_size'],
shuffle=False,
num_workers=config['num_workers'],
pin_memory=True,
sampler=val_sampler)
return train_dataloader, val_dataloader, train_sampler
def run_val(model, criterion, val_dataloader, vis_path=None):
"""Validation pass
Runs in no_grad with model in eval mode
sets model back to train() at the end
"""
model.eval()
num_batches = len(val_dataloader)
mean_loss = 0
cnt = 0
with torch.no_grad():
for curr_batch in val_dataloader:
print("Val iter %d / %d" % (cnt, len(val_dataloader)))
input_a = curr_batch['input_a'].cuda()
input_b = curr_batch['input_b'].cuda()
target = curr_batch['target'].cuda()
imnames = curr_batch['imname']
output_dict = model(input_a, input_b)
output_recon = output_dict['reconstruction']
loss = criterion(output_dict['reconstruction'], target)
mean_loss = mean_loss + loss/num_batches
if vis_path is not None and cnt == 0:
try:
os.mkdir(vis_path)
except OSError:
print('Folder exists')
# dump validation images into vis_path folder
B, C, H, W = input_a.shape
visualization_centers = output_dict['vis_centers']
for b in range(B):
imname, _ = imnames[b].split('.')
_ = dump_image(target[b].cpu(), None, os.path.join(vis_path, imname+'.png'))
_ = dump_image(output_recon[b].cpu(), None, os.path.join(vis_path, imname+'o.png'))
cnt = cnt + 1
model.train()
return mean_loss
def run_visualization(output_dict, output_recon, target, input_a, input_b, out_path, tb_logger, step, warped_heatmap=None):
"""Function for preparing visualizations in the tensorboard log
"""
visualization_centers = output_dict['vis_centers']
x = visualization_centers[0]
y = visualization_centers[1]
x_b, y_b = output_dict['input_b_gauss_params'][0], output_dict['input_b_gauss_params'][1]
vis1img = dump_image(target[0].cpu(), (x[0], y[0]), os.path.join(out_path, 'vis1.png'))
vis1oimg = dump_image(output_recon[0], (x[0], y[0]), os.path.join(out_path, 'vis1o.png'))
if 'background_recon' in output_dict.keys():
vis1baimg = dump_image(output_dict['background_recon'][0], None, os.path.join(out_path, 'vis1ba.png'))
vis1dimg = dump_image(output_dict['decoded_foreground'][0], None, os.path.join(out_path, 'vis1d.png'))
target_imgs = np.concatenate((vis1img, vis1oimg, vis1baimg, vis1dimg), axis=1)
tb_logger.add_image("target_reconstruction_background_foreground", target_imgs, global_step=step, dataformats='HWC')
mask_imgs = torch.cat((output_dict['input_a_fg_mask'][0], output_dict['input_b_fg_mask'][0]), dim=2)
tb_logger.add_image("inputamask_inputbmask", mask_imgs, global_step=step, dataformats='CHW')
else:
target_and_recon = np.concatenate((vis1img, vis1oimg), axis=1)
tb_logger.add_image("target_reconstruction", target_and_recon, global_step=step, dataformats='HWC')
vis1aimg = dump_image(input_a[0].cpu(), (x[0], y[0]), os.path.join(out_path, 'vis1a.png'))
vis1bimg = dump_image(input_b[0].cpu(), (x_b[0], y_b[0]), os.path.join(out_path, 'vis1b.png'))
input_imgs = np.concatenate((vis1aimg, vis1bimg), axis=1)
tb_logger.add_image("input_a_b", input_imgs, global_step=step, dataformats='HWC')
if 'weighted_center_prediction' in output_dict.keys():
object_center = output_dict['weighted_center_prediction'][0]
object_center_x = object_center[0]
object_center_y = object_center[1]
predicted_centers = output_dict['fg_predicted_centers']
vis1imgcenter = plot_offsets(target[0], predicted_centers[0], os.path.join(out_path, 'vis1c.png'))
tb_logger.add_image("predicted_center", vis1imgcenter, global_step = step, dataformats='HWC')
heat_maps = output_dict['input_a_heatmaps'][0].data.cpu().numpy()
heat_maps_out = project_heatmaps_colorized(heat_maps)
tb_logger.add_image("raw_heatmap", heat_maps_out.astype(np.uint8), global_step=step, dataformats='CHW')
if warped_heatmap is not None:
warped_heatmap = warped_heatmap.data.cpu().numpy()
warped_heatmaps_out = project_heatmaps_colorized(warped_heatmap)
tb_logger.add_image("warped_raw_heatmap", warped_heatmap_out.astype(np.uint8), global_step=step, dataformats='CHW')
def apply_GAN_criterion(output_recon, target, predicted_keypoints,
discriminator, criterionGAN):
"""Sub-routine for applying adversarial loss within the main train loop
Adapted from https://github.com/NVIDIA/pix2pixHD/blob/master/models/pix2pixHD_model.py, which in turn was adpated from
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/pix2pix_model.py
Args:
output_recon (torch.tensor): reconstruction from decoder.
target (torch.tensor): reference image.
predicted_keypoints (torch.tensor): predicted gauss maps.
discriminator (torch.nn.Module): discriminator model.
criterionGAN (torch.nn.Module): decoder criterion.
Returns:
Loss values for the generator and discriminator
"""
pred_fake_D = discriminator(output_recon.detach(), predicted_keypoints)
loss_D_fake = criterionGAN(pred_fake_D, False)
pred_real = discriminator(target.detach(), predicted_keypoints)
loss_D_real = criterionGAN(pred_real, True)
pred_fake = discriminator(output_recon, predicted_keypoints)
loss_G_GAN = criterionGAN(pred_fake, True)
return loss_G_GAN, loss_D_real, loss_D_fake
def main(config):
save_path = config['save_path']
epochs = config['epochs']
os.environ['TORCH_HOME'] = config['torch_home']
distributed = config['use_DDP']
start_ep = 0
start_cnt = 0
# initialize model
print("Initializing model...")
if distributed:
initialize_distributed(config)
rank = config['rank']
# map string name to class constructor
model = get_model(config)
model.apply(init_weights)
if config['resume_ckpt'] is not None:
# load weights from checkpoint
state_dict = load_weights(config['resume_ckpt'])
model.load_state_dict(state_dict)
print("Moving model to GPU")
model.cuda(torch.cuda.current_device())
print("Setting up losses")
if config['use_vgg']:
criterionVGG = Vgg19PerceptualLoss(config['reduced_w'])
criterionVGG.cuda()
validationLoss = criterionVGG
if config['use_gan']:
use_sigmoid = config['no_lsgan']
disc_input_channels = 3
discriminator = MultiscaleDiscriminator(disc_input_channels, config['ndf'], config['n_layers_D'], 'instance',
use_sigmoid, config['num_D'], False, False)
discriminator.apply(init_weights)
if config['resume_ckpt_D'] is not None:
# load weights from checkpoint
print("Resuming discriminator from %s" %(config['resume_ckpt_D']))
state_dict = load_weights(config['resume_ckpt_D'])
discriminator.load_state_dict(state_dict)
discriminator.cuda(torch.cuda.current_device())
criterionGAN = GANLoss(use_lsgan=not config['no_lsgan'])
criterionGAN.cuda()
criterionFeat = nn.L1Loss().cuda()
# initialize dataloader
print("Setting up dataloaders...")
train_dataloader, val_dataloader, train_sampler = setup_dataloaders(config)
print("Done!")
# run the training loop
print("Initializing optimizers...")
optimizer_G = optim.Adam(model.parameters(), lr=config['learning_rate'], weight_decay=config['weight_decay'])
if config['resume_ckpt_opt_G'] is not None:
optimizer_G_state_dict = torch.load(config['resume_ckpt_opt_G'], map_location=lambda storage, loc: storage)
optimizer_G.load_state_dict(optimizer_G_state_dict)
if config['use_gan']:
optimizer_D = optim.Adam(discriminator.parameters(), lr=config['learning_rate'])
if config['resume_ckpt_opt_D'] is not None:
optimizer_D_state_dict = torch.load(config['resume_ckpt_opt_D'], map_location=lambda storage, loc: storage)
optimizer_D.load_state_dict(optimizer_D_state_dict)
print("Done!")
if distributed:
print("Moving model to DDP...")
model = DDP(model)
if config['use_gan']:
discriminator = DDP(discriminator, delay_allreduce=True)
print("Done!")
tb_logger = None
if rank == 0:
tb_logdir = os.path.join(save_path, 'tbdir')
if not os.path.exists(tb_logdir):
os.makedirs(tb_logdir)
tb_logger = SummaryWriter(tb_logdir)
# run training
if not os.path.exists(save_path):
os.makedirs(save_path)
log_name = os.path.join(save_path, 'loss_log.txt')
opt_name = os.path.join(save_path, 'opt.yaml')
print(config)
save_options(opt_name, config)
log_handle = open(log_name, 'a')
print("Starting training")
cnt = start_cnt
assert(config['use_warped'] or config['use_temporal'])
for ep in range(start_ep, epochs):
if train_sampler is not None:
train_sampler.set_epoch(ep)
for curr_batch in train_dataloader:
optimizer_G.zero_grad()
input_a = curr_batch['input_a'].cuda()
target = curr_batch['target'].cuda()
if config['use_warped'] and config['use_temporal']:
input_a = torch.cat((input_a, input_a), 0)
input_b = torch.cat((curr_batch['input_b'].cuda(), curr_batch['input_temporal'].cuda()), 0)
target = torch.cat((target, target), 0)
elif config['use_temporal']:
input_b = curr_batch['input_temporal'].cuda()
elif config['use_warped']:
input_b = curr_batch['input_b'].cuda()
output_dict = model(input_a, input_b)
output_recon = output_dict['reconstruction']
loss_vgg = loss_G_GAN = loss_G_feat = 0
if config['use_vgg']:
loss_vgg = criterionVGG(output_recon, target) * config['vgg_lambda']
if config['use_gan']:
predicted_landmarks = output_dict['input_a_gauss_maps']
# output_dict['reconstruction'] can be considered normalized
loss_G_GAN, loss_D_real, loss_D_fake = apply_GAN_criterion(output_recon, target, predicted_landmarks.detach(),
discriminator, criterionGAN)
loss_D = (loss_D_fake + loss_D_real) * 0.5
loss_G = loss_G_GAN + loss_G_feat + loss_vgg
loss_G.backward()
# grad_norm clipping
if not config['no_grad_clip']:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer_G.step()
if config['use_gan']:
optimizer_D.zero_grad()
loss_D.backward()
# grad_norm clipping
if not config['no_grad_clip']:
torch.nn.utils.clip_grad_norm_(discriminator.parameters(), 1.0)
optimizer_D.step()
if distributed:
if config['use_vgg']:
loss_vgg = reduce_tensor(loss_vgg, config['world_size'])
if rank == 0:
if cnt % 10 == 0:
run_visualization(output_dict, output_recon, target, input_a, input_b, save_path, tb_logger, cnt)
print_dict = {"learning_rate": get_learning_rate(optimizer_G)}
if config['use_vgg']:
tb_logger.add_scalar('vgg.loss', loss_vgg, cnt)
print_dict['Loss_VGG'] = loss_vgg.data
if config['use_gan']:
tb_logger.add_scalar('gan.loss', loss_G_GAN, cnt)
tb_logger.add_scalar('d_real.loss', loss_D_real, cnt)
tb_logger.add_scalar('d_fake.loss', loss_D_fake, cnt)
print_dict['Loss_G_GAN'] = loss_G_GAN
print_dict['Loss_real'] = loss_D_real.data
print_dict['Loss_fake'] = loss_D_fake.data
log_iter(ep, cnt % len(train_dataloader), len(train_dataloader), print_dict, log_handle=log_handle)
if loss_G != loss_G:
print("NaN!!")
exit(-2)
cnt = cnt+1
# end of train iter loop
if ep % config['val_freq'] == 0 and config['val_freq'] > 0:
val_loss = run_val(model, validationLoss, val_dataloader, os.path.join(save_path, 'val_%d_renders' % (ep)))
if distributed:
val_loss = reduce_tensor(val_loss, config['world_size'])
if rank == 0:
tb_logger.add_scalar('validation.loss', val_loss, ep)
log_iter(ep, 1, 1, {"Loss_VGG": val_loss}, header="Validation loss: ", log_handle=log_handle)
if rank == 0:
if (ep % config['save_freq'] == 0):
fname = 'checkpoint_%d.ckpt' % (ep)
fname = os.path.join(save_path, fname)
print("Saving model...")
save_weights(model, fname, distributed)
optimizer_g_fname = os.path.join(save_path, 'latest_optimizer_g_state.ckpt')
torch.save(optimizer_G.state_dict(), optimizer_g_fname)
if config['use_gan']:
fname = 'checkpoint_D_%d.ckpt' % (ep)
fname = os.path.join(save_path, fname)
save_weights(discriminator, fname, distributed)
optimizer_d_fname = os.path.join(save_path, 'latest_optimizer_d_state.ckpt')
torch.save(optimizer_D.state_dict(), optimizer_d_fname)
# end of epoch loop
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--config', type=str, help="Path to config file for current experiment")
# defaults.yaml stores list of all options with their default values
# do not edit that file unless you're adding additional options or wish to change defaults.
config = parse_all_args(parser, 'configs/defaults.yaml')
main(config)
| UnsupervisedLandmarkLearning-master | train.py |
"""Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Dataset classes for handling the BBCPose data
"""
from torch.utils.data import Dataset
import torch
import os
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
import scipy.io as sio
from .base_datasets import BaseVideoDataset
class BBCPoseDataset(BaseVideoDataset):
def __init__(self, args, partition):
super(BBCPoseDataset, self).__init__(args, partition)
def setup_frame_array(self, args, partition):
if partition == 'train':
self.input_vids = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
elif partition == 'validation':
self.input_vids = ['11', '12', '13', '14', '15']
# load the annotations file
self.annos = sio.loadmat(os.path.join(self.dataset_path, 'code', 'bbcpose.mat'))['bbcpose'][0]
# first bin is 0
self.num_frames_array = [0]
frac = 1
if partition == 'val':
frac = args['val_frac']
for folder in self.input_vids:
curr_vid_anno = self.annos[int(folder)-1]
# truncate validation if frac is specified
self.num_frames_array.append(int(curr_vid_anno[3].shape[1]*frac))
self.num_frames_array = np.array(self.num_frames_array).cumsum()
return self.num_frames_array
def process_batch(self, vid_idx, img_idx):
vid_path = os.path.join(self.dataset_path, self.input_vids[vid_idx])
curr_vid_anno = self.annos[int(self.input_vids[vid_idx])-1]
num_frames = curr_vid_anno[3].shape[1]
img_idx2_offset = self.sample_temporal(num_frames, img_idx, 3, 40)
gt_kpts = curr_vid_anno[4][:, :, img_idx].copy()
img_1 = os.path.join(vid_path, str(int(curr_vid_anno[3][0][img_idx])) + '.jpg')
img_2 = os.path.join(vid_path, str(int(curr_vid_anno[3][0][img_idx + img_idx2_offset])) + '.jpg')
bbox_x_min = gt_kpts[0].min() - 60
bbox_x_max = gt_kpts[0].max() + 60
bbox_y_min = gt_kpts[1].min() - 60
bbox_y_max = gt_kpts[1].max() + 60
# clip the bounding boxes
img_a = Image.open(img_1).convert('RGB')
wh = img_a.size
bbox_x_min = max(0, bbox_x_min)
bbox_y_min = max(0, bbox_y_min)
bbox_x_max = min(wh[0], bbox_x_max)
bbox_y_max = min(wh[1], bbox_y_max)
bbox_a = (bbox_x_min, bbox_y_min, bbox_x_max, bbox_y_max)
img_a = img_a.crop(bbox_a)
img_temporal = Image.open(img_2).convert('RGB')
img_temporal = img_temporal.crop(bbox_a)
# randomly flip
if np.random.rand() <= self.flip_probability:
# flip both images
img_a = transforms.functional.hflip(img_a)
img_temporal = transforms.functional.hflip(img_temporal)
bbox_w = bbox_x_max - bbox_x_min
bbox_h = bbox_y_max - bbox_y_min
img_temporal = self.to_tensor(self.resize(img_temporal))
img_temporal = self.normalize(img_temporal)
img_a_color_jittered, img_a_warped, img_a_warped_offsets, target=self.construct_color_warp_pair(img_a)
# 2x7 array of x and y
gt_kpts_normalized = gt_kpts.copy()
gt_kpts_normalized[0] = ((gt_kpts_normalized[0] - bbox_x_min) / bbox_w - 0.5) * 2
gt_kpts_normalized[1] = ((gt_kpts_normalized[1] - bbox_y_min) / bbox_h - 0.5) * 2
return {'input_a': img_a_color_jittered, 'input_b': img_a_warped,
'input_temporal': img_temporal, 'target': target,
'imname': self.input_vids[vid_idx] + '_' + str(img_idx) + '.jpg',
'warping_tps_params': img_a_warped_offsets, 'gt_kpts': gt_kpts_normalized}
class BBCPoseLandmarkEvalDataset(Dataset):
def __init__(self, args, partition='train'):
super(BBCPoseLandmarkEvalDataset, self).__init__()
self.partition = partition
self.dataset_path = args['dataset_path']
self.img_size = args['img_size']
# if test set, load from the .mat annotation file
if self.partition == 'test':
self.annos = sio.loadmat(os.path.join(self.dataset_path, 'code', 'results.mat'))['results'][0][0]
self.gt = self.annos[0]
self.frame_name = self.annos[1][0]
self.video_name = self.annos[2][0]
self.img_len = len(self.video_name)
else:
self.annos = sio.loadmat(os.path.join(self.dataset_path, 'code', 'bbcpose.mat'))['bbcpose'][0]
self.num_frames_array = [0]
if partition == 'train_heldout':
# some frames from here are unannotated and therefore unused during training
# useful for testing performance on novel frames of the training subjects
self.input_vids = ['4', '6', '8', '10']
for folder in self.input_vids:
self.num_frames_array.append(20)
else:
if partition == 'train':
self.input_vids = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
elif partition == 'val':
self.input_vids = ['11', '12', '13', '14', '15']
for folder in self.input_vids:
curr_vid_anno = self.annos[int(folder)-1]
self.num_frames_array.append(curr_vid_anno[3].shape[1])
self.num_frames_array = np.array(self.num_frames_array).cumsum()
self.img_len = self.num_frames_array[-1]
self.orig_transforms = transforms.Compose([
transforms.Resize([self.img_size, self.img_size]),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])
def __len__(self):
return self.img_len
def get_frame_index(self, global_idx):
vid_idx = np.searchsorted(self.num_frames_array, global_idx, side='right')-1
frame_idx = global_idx - self.num_frames_array[vid_idx]
return vid_idx, frame_idx
def __getitem__(self, idx):
if self.partition == 'test':
vid_idx, img_idx = self.video_name[idx], self.frame_name[idx]
img_1 = os.path.join(self.dataset_path, str(vid_idx), str(img_idx) + '.jpg')
gt_kpts = self.gt[:, :, idx]
elif self.partition == 'train_heldout':
vid_idx, img_idx = self.get_frame_index(idx)
print('vid_idx', vid_idx, 'img_idx', img_idx)
vid_path = os.path.join(self.dataset_path, self.input_vids[vid_idx])
curr_vid_anno = self.annos[int(self.input_vids[vid_idx])-1]
gt_kpts = curr_vid_anno[4][:, :, 0] # take the first box
img_1 = os.path.join(vid_path, str(img_idx+1) + '.jpg')
else:
vid_idx, img_idx = self.get_frame_index(idx)
vid_path = os.path.join(self.dataset_path, self.input_vids[vid_idx])
curr_vid_anno = self.annos[int(self.input_vids[vid_idx])-1]
gt_kpts = curr_vid_anno[4][:, :, img_idx]
img_1 = os.path.join(vid_path, str(int(curr_vid_anno[3][0][img_idx])) + '.jpg')
with Image.open(img_1) as img:
img = img.convert('RGB')
wh = img.size # width, height of image
box_x_1 = gt_kpts[0].min()
box_x_2 = gt_kpts[0].max()
box_y_1 = gt_kpts[1].min()
box_y_2 = gt_kpts[1].max()
box_x_center = (box_x_1 + box_x_2)/2
box_y_center = (box_y_1 + box_y_2)/2
bbox_x_min = max(0, box_x_center - 150)
bbox_x_max = min(wh[0], box_x_center + 150)
bbox_y_min = max(0, box_y_center - 150)
bbox_y_max = min(wh[1], box_y_center + 150)
target_keypoints = gt_kpts.copy()
bbox = (bbox_x_min, bbox_y_min, bbox_x_max, bbox_y_max)
img = img.crop(bbox)
bbox_w = bbox_x_max - bbox_x_min
bbox_h = bbox_y_max - bbox_y_min
# center coordinate space
target_keypoints[0] = (target_keypoints[0] - bbox_x_min - bbox_w/2) / bbox_w
target_keypoints[1] = (target_keypoints[1] - bbox_y_min - bbox_h/2) / bbox_h
target_keypoints = torch.flatten(torch.FloatTensor(target_keypoints))
img = self.orig_transforms(img)
return {'input_a': img, 'gt_kpts': target_keypoints, 'vid_idx': vid_idx, 'img_idx': img_idx, 'bbox': np.array(bbox)}
| UnsupervisedLandmarkLearning-master | dataloaders/bbc_pose_dataset.py |
"""
Custom transformation functions for image augmentation
"""
import random
import numpy as np
from numpy.random import random_sample
import cv2 # for TPS
import torch
import torchvision.transforms as transforms_t
import torchvision.transforms.functional as F
class TPSWarp(object):
"""
TPS param for non-linear warping:
nonlinear_pert_range: [-2, 2] (random perturbation of x and y by +/- 2 pixels
TPS params for affine transformation
defaults: rotation +/- pi/4
scales between 0.9 and 1.1 factor
translates between +/-5 pixels
"""
def __init__(self, image_size, margin, num_vertical_points, num_horizontal_points,
nonlinear_pert_range=[-2, 2],
rot_range=[-np.pi/8, np.pi/8],
scale_range=[1.05, 1.15],
trans_range=[-10, 10], append_offset_channels=False):
self.nonlinear_pert_range = nonlinear_pert_range
self.rot_range = rot_range
self.scale_range = scale_range
self.trans_range = trans_range
self.num_points = num_horizontal_points*num_vertical_points
self.append_offset_channels = append_offset_channels
horizontal_points = np.linspace(margin, image_size[0] - margin, num_horizontal_points)
vertical_points = np.linspace(margin, image_size[1] - margin, num_vertical_points)
xv, yv = np.meshgrid(horizontal_points, vertical_points, indexing='xy')
xv = xv.reshape(1, -1, 1)
yv = yv.reshape(1, -1, 1)
self.grid = np.concatenate((xv, yv), axis=2)
self.matches = list()
# TPS define the alignment between source and target grid points
# here, we just assume nth source keypoint aligns to nth target keypoint
for i in range(self.num_points):
self.matches.append(cv2.DMatch(i, i, 0))
def sample_warp(self):
"""samples the warping matrix based on initialized parameters
"""
# will be on the right side of the multiply, e.g ([x,y] * w
rot = random_sample() * (self.rot_range[1] - self.rot_range[0]) + self.rot_range[0]
sc_x = random_sample() * (self.scale_range[1] - self.scale_range[0]) + self.scale_range[0]
sc_y = random_sample() * (self.scale_range[1] - self.scale_range[0]) + self.scale_range[0]
t_x = random_sample() * (self.trans_range[1] - self.trans_range[0]) + self.trans_range[0]
t_y = random_sample() * (self.trans_range[1] - self.trans_range[0]) + self.trans_range[0]
# return a transposed matrix
rotscale = [[ sc_x*np.cos(rot), -np.sin(rot)],
[ np.sin(rot), sc_y*np.cos(rot)]]
return rotscale, t_x, t_y
def random_perturb(self):
"""Returns a matrix for individually perturbing each grid point
"""
perturb_mat = random_sample(self.grid.shape) * (self.nonlinear_pert_range[1]
- self.nonlinear_pert_range[0]) + self.nonlinear_pert_range[0]
return perturb_mat
def __call__(self, img, tps=None):
"""
accepts a PIL image
must convert to numpy array to apply TPS
converts back to PIL image before returning
"""
# construct the transformed grid from the regular grid
img_as_arr = np.transpose(img.numpy(), (1, 2, 0))
if tps is None:
warp_matrix, t_x, t_y = self.sample_warp()
perturb_mat = self.random_perturb()
center = np.array([[[self.grid[:, :, 0].max()/2.0 + t_x, self.grid[:, :, 1].max()/2.0 + t_y]]])
target_grid = np.matmul((self.grid - center), warp_matrix) + perturb_mat + center
tps = cv2.createThinPlateSplineShapeTransformer()
tps.estimateTransformation(self.grid, target_grid, self.matches)
img_as_arr = tps.warpImage(img_as_arr, borderMode=cv2.BORDER_REPLICATE)
dims = img_as_arr.shape
if self.append_offset_channels: # extract ground truth warping offsets
full_grid_x, full_grid_y = np.meshgrid(np.arange(dims[1]), np.arange(dims[0]))
dims_half_x = dims[1]/2.0
dims_half_y = dims[0]/2.0
full_grid_x = (full_grid_x - dims_half_x)/dims_half_x
full_grid_y = (full_grid_y - dims_half_y)/dims_half_y
full_grid = np.concatenate((np.expand_dims(full_grid_x, 2), np.expand_dims(full_grid_y, 2)), axis=2)
img_coord_arr = tps.warpImage(full_grid.astype(np.float32), borderValue=-1024)
displacement = img_coord_arr
img_as_arr = np.concatenate((img_as_arr, displacement), 2)
# convert back to PIL and return
out_img = torch.from_numpy(img_as_arr).permute(2, 0, 1)
return out_img
class PairedColorJitter(object):
"""
Based on the source for torchvision.transforms.ColorJitter
https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#ColorJitter
Modified to apply the same color jitter transformation for a pair of input images
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
def __call__(self, img1, img2):
transforms = []
brightness_factor = random.uniform(self.brightness[0], self.brightness[1])
transforms.append(transforms_t.Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
contrast_factor = random.uniform(self.contrast[0], self.contrast[1])
transforms.append(transforms_t.Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
saturation_factor = random.uniform(self.saturation[0], self.saturation[1])
transforms.append(transforms_t.Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
hue_factor = random.uniform(self.hue[0], self.hue[1])
transforms.append(transforms_t.Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = transforms_t.Compose(transforms)
return transform(img1), transform(img2)
| UnsupervisedLandmarkLearning-master | dataloaders/transforms.py |
""" Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Base class for our video dataset
"""
from torch.utils.data import Dataset
import numpy as np
import torchvision.transforms as transforms
from .transforms import TPSWarp, PairedColorJitter
class BaseVideoDataset(Dataset):
"""
Base dataset class for all video-type datasets in landmark learning
"""
def __init__(self, args, partition, inference_mode=False):
super(BaseVideoDataset, self).__init__()
self.dataset_path = args['dataset_path']
self.flip_probability = args['flip_probability']
self.img_size = args['img_size']
self.inference_mode = inference_mode
self.num_frames_array = self.setup_frame_array(args, partition)
assert(self.num_frames_array[0] == 0)
# video frame folders
self.resize = transforms.Resize([self.img_size, self.img_size])
self.to_tensor = transforms.ToTensor()
self.paired_color_jitter = PairedColorJitter(0.4, 0.4, 0.4, 0.3)
self.color_jitter = transforms.ColorJitter(0.4, 0.4, 0.4, 0.3)
self.TPSWarp = TPSWarp([self.img_size, self.img_size], 10, 10, 10,
rot_range=[args['rot_lb'], args['rot_ub']],
trans_range=[args['trans_lb'], args['trans_ub']],
scale_range=[args['scale_lb'], args['scale_ub']],
append_offset_channels=True)
self.normalize = transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
def setup_frame_array(self, args):
"""
Implement this function to setup the cummulative array
cummulative array should have N+1 bins, where N is the number of videos
first bin should be 0
last bin should be the total number of frames in the dataset
Also use this function to setup any dataset-specific fields
"""
pass
def __len__(self):
"""
returns length of dataset (total number of frames)
"""
return self.num_frames_array[-1]
def get_frame_index(self, global_idx):
"""maps global frame index to video index and local video frame index
"""
vid_idx = np.searchsorted(self.num_frames_array, global_idx, side='right')-1
frame_idx = global_idx - self.num_frames_array[vid_idx]
return vid_idx, frame_idx
def process_batch(self, vid_idx, img_idx):
"""
implement this function
extracts the requisite frames from the dataset
returns a dictionary that must include entries in the required_keys variable in __getitem__
"""
pass
def sample_temporal(self, num_vid_frames, img_idx, range_min, range_max):
"""
samples another frame from the same video sequence
num_vid_frames: num frames in current video segment
range_min: minimum sampling offset (must be at least this far away)
range_max: maximum sampling offset
"""
if num_vid_frames - img_idx > range_min:
idx_offset = np.random.randint(range_min, min(range_max, num_vid_frames-img_idx))
else:
# sample in the opposite direction
idx_offset = -min(img_idx, np.random.randint(range_min, range_max))
return idx_offset
def construct_color_warp_pair(self, img):
"""
given an input image
constructs the color jitter - warping training pairs
returns color jittered, warped image, warping flow, and target tensors
"""
img_color_jittered = self.to_tensor(self.resize(self.color_jitter(img)))
img = self.to_tensor(self.resize(img))
img_warped = self.TPSWarp(img)
img_warped_offsets = img_warped[3:]
img_warped = self.normalize(img_warped[0:3])
img_color_jittered = self.normalize(img_color_jittered)
target = self.normalize(img)
return img_color_jittered, img_warped, img_warped_offsets, target
def __getitem__(self, idx):
# convert global index to video and local frame index
vid_idx, img_idx = self.get_frame_index(idx)
out_dict = self.process_batch(vid_idx, img_idx)
# assert all required keys are present
# construct the batch
if self.inference_mode:
required_keys = {'input_a', 'vid_idx', 'img_idx'}
else:
required_keys = {'input_a', 'input_b', 'target', 'input_temporal', 'imname'}
assert(len(required_keys - out_dict.keys()) == 0)
return out_dict
| UnsupervisedLandmarkLearning-master | dataloaders/base_datasets.py |
UnsupervisedLandmarkLearning-master | dataloaders/__init__.py |
|
import torch
import torch.distributed
import yaml
import os
from models.part_factorized_model import PartFactorizedModel
def denormalize_batch(batch, div_factor=1):
"""denormalize for visualization"""
# normalize using imagenet mean and std
mean = batch.data.new(batch.data.size())
std = batch.data.new(batch.data.size())
mean[0, :, :] = 0.5
mean[1, :, :] = 0.5
mean[2, :, :] = 0.5
std[0, :, :] = 0.5
std[1, :, :] = 0.5
std[2, :, :] = 0.5
batch = (batch * std + mean) * div_factor
return batch
def get_model(args):
if args['model'] == 'PartFactorizedModel':
return PartFactorizedModel(args)
else:
print("No such model")
exit(1)
def save_weights(model, save_path, used_DDP=False):
"""Model saving wrapper
If use_DDP, then save on model.module, else save model directly
Args:
model (torch.nn.module): Model to save.
save_path (str): Path to the checkpoint
used_DDP (bool): Whether the model was trained with DDP
"""
if used_DDP:
# unwrap the model
torch.save(model.module.state_dict(), save_path)
else:
torch.save(model.state_dict(), save_path)
def load_weights(ckpt):
"""
For safety, force-load the model onto the CPU
"""
state_dict = torch.load(ckpt, map_location=lambda storage, loc: storage)
return state_dict
def save_options(opt_name, config):
"""saves current model options
Args:
opt_name (str): path to options save file
config (dict): current config dictionary
"""
with open(opt_name, 'wt') as opt_file:
# opt_file.write('------------ Options -------------\n')
for k, v in sorted(config.items()):
if k in {'use_DDP', 'rank', 'world_size'}:
continue
opt_file.write('%s: %s\n' % (str(k), str(v)))
# opt_file.write('-------------- End ----------------\n')
def log_iter(epoch, iter, total, print_dict, header=None, log_handle=None):
"""
function for printing out losses, and optionally logs to a logfile if handle is passed in
"""
msg = "Epoch %d iter %d / %d" % (epoch, iter, total)
if header is not None:
msg = header + msg
for k, v in print_dict.items():
msg = msg + " | %s: %f" % (k, v)
if iter % 10 == 0:
log_handle.flush()
print(msg)
if log_handle is not None:
log_handle.write("%s\n" % msg)
def reduce_tensor(tensor, world_size):
# reduce tensore for DDP
# source: https://raw.githubusercontent.com/NVIDIA/apex/master/examples/imagenet/main_amp.py
rt = tensor.clone()
torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)
rt /= world_size
return rt
def parse_all_args(parser, defaults_file, return_args=False):
"""Processes the command line args from parser
Processes the command line args stored in the parser to override
defaults, then stores everything in a dictionary
Args:
parser (argparse.ArgumentParser): Argument parser for command line
defaults_file (str): Path to yaml file storing default values for all options
Returns:
config (dict): All options stored in a dictionary
"""
default_configs_h = open(defaults_file, 'r')
config = yaml.load(default_configs_h, Loader=yaml.FullLoader)
default_configs_h.close()
# add defaults.yaml options to the parser
for option, value in config.items():
if type(value) == bool:
parser.add_argument('--'+option, action='store_true')
else:
parser.add_argument('--'+option, type=type(value))
args = parser.parse_args()
# read in the specified config
user_config_h = open(args.config, 'r')
user_config = yaml.load(user_config_h, Loader=yaml.FullLoader)
user_config_h.close()
for option, value in user_config.items():
config[option] = value
# now override again if specified in commandline args
for option in config.keys():
func = getattr(args, option)
if func is not None and func is not False:
config[option] = func
# set the DDP params
config['rank'] = 0 # default value
if config['use_DDP']:
config['world_size'] = int(os.environ['WORLD_SIZE'])
config['rank'] = int(os.environ['RANK'])
config['local_rank'] = int(os.environ['LOCAL_RANK'])
if return_args:
return config, args
else:
return config
def initialize_distributed(config):
"""
Sets up necessary stuff for distributed
training if the world_size is > 1
Args:
config (dict): configurations for this run
"""
if not config['use_DDP']:
return
# Manually set the device ids.
local_rank = config['local_rank']
world_size = config['world_size']
rank = config['rank']
torch.cuda.set_device(rank % torch.cuda.device_count())
print('Global Rank:')
print(rank)
# Call the init process
if world_size > 1:
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', '127.0.0.1')
master_port = os.getenv('MASTER_PORT', '6666')
init_method += master_ip+':'+master_port
torch.distributed.init_process_group(
backend='nccl',
world_size=world_size, rank=rank,
init_method=init_method)
def get_learning_rate(optimizer):
"""
Extracts the optimizer's base learning rate
"""
for param_group in optimizer.param_groups:
return param_group['lr']
| UnsupervisedLandmarkLearning-master | utils/utils.py |
"""
Utility functions for visualization and image dumping
"""
from utils.utils import denormalize_batch
from PIL import Image
from PIL import ImageDraw
import numpy as np
def uint82bin(n, count=8):
"""adapted from https://github.com/ycszen/pytorch-segmentation/blob/master/transform.py
returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def generate_palette():
"""adapted from https://github.com/ycszen/pytorch-segmentation/blob/master/transform.py
Used to generate the color palette we use to plot colorized heatmaps
"""
N = 41
palettes = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r, g, b = 0, 0, 0
id = i+1
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7-j))
g = g ^ (np.uint8(str_id[-2]) << (7-j))
b = b ^ (np.uint8(str_id[-3]) << (7-j))
id = id >> 3
palettes[i, 0] = r
palettes[i, 1] = g
palettes[i, 2] = b
palettes = palettes.astype(np.float)
return palettes
def dump_image(normalized_image, landmark_coords, out_name=None):
"""Denormalizes the output image and optionally plots the landmark coordinates onto the image
Args:
normalized_image (torch.tensor): Image reconstruction output from the model (normalized)
landmark_coords (torch.tensor): x, y coordinates in normalized range -1 to 1
out_name (str, optional): file to write to
Returns:
np.array: uint8 image data stored in numpy format
"""
warped_image = np.clip(denormalize_batch(normalized_image).data.cpu().numpy(), 0, 1)
img = Image.fromarray((warped_image.transpose(1, 2, 0)*255).astype(np.uint8))
if landmark_coords is not None:
xs = landmark_coords[0].data.cpu().numpy()
ys = landmark_coords[1].data.cpu().numpy()
h, w = img.size
draw = ImageDraw.Draw(img)
for i in range(len(xs)):
x_coord = (xs[i] + 1) * h // 2
y_coord = (ys[i] + 1) * w // 2
draw.text((x_coord, y_coord), str(i), fill=(0, 0, 0, 255))
if out_name is not None:
img.save(out_name)
return np.array(img)
def project_heatmaps_colorized(heat_maps):
color_palette = generate_palette()
c, h, w = heat_maps.shape
heat_maps = heat_maps / heat_maps.max()
heat_maps_colored = np.matmul(heat_maps.reshape(c, h*w).transpose(1, 0), color_palette[0:c, :])
heat_maps_out = np.clip(heat_maps_colored.transpose(1, 0).reshape(3, h, w), 0, 255)
return heat_maps_out
| UnsupervisedLandmarkLearning-master | utils/visualizer.py |
""" Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
This file contains subroutines for our training pipeline
"""
import torch
import torch.nn as nn
def conv_ReLU(in_channels, out_channels, kernel_size, stride=1, padding=0,
use_norm=True, norm=nn.InstanceNorm2d):
"""Returns a 2D Conv followed by a ReLU
"""
if use_norm:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding),
norm(out_channels),
nn.ReLU(inplace=True))
else:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding),
nn.ReLU(inplace=True))
def estimate_gaussian_params(in_heatmaps, grid_x, grid_y, return_covar=False, skip_norm=False, activation=torch.exp, use_fixed_covar=False, fixed_covar=0.1):
"""Converts heatmaps to 2D Gaussians by estimating mean and covariance
Args:
in_heatmaps: b x K x H x W heatmaps
grid_x, grid_y: 1 x 1 x (HW) We compute mean and covariance over these
return_covar (bool): if true, also return the covariance matrix
activation: activation function for logits. Defaults to torch.exp, which gives us a softmax
use_fixed_covar (bool): if true, return hard coded scaled identity matrix, otherwise estimate it from the heatmap
"""
b, c, h, w = in_heatmaps.shape
heatmaps_reshaped = in_heatmaps.view(b, c, -1)
# should be b x c x HW
if skip_norm:
heatmaps_norm = heatmaps_reshaped
else:
heatmaps_norm = activation(heatmaps_reshaped)
heatmaps_norm = heatmaps_norm / heatmaps_norm.sum(2, True)
mu_x = torch.sum(heatmaps_norm * grid_x, 2)
mu_y = torch.sum(heatmaps_norm * grid_y, 2)
if return_covar:
if use_fixed_covar: # generate a fixed diagonal covariance matrix
covar = torch.eye(2, 2, device=torch.cuda.current_device()).view(1, 1, 2, 2) * fixed_covar
else: # actually estimate the covariance from the heatmaps
# should be 1 x 1 x 2 x HW
coord_grids_xy = torch.cat((grid_x, grid_y), dim=1).unsqueeze(0)
# covar will be b x 1 x 2 x 2
mu = torch.stack((mu_x, mu_y), 2).view(b, c, 2, 1)
mu_outer = torch.matmul(mu, torch.transpose(mu, 2, 3))
covar = torch.matmul(coord_grids_xy * heatmaps_norm.unsqueeze(2), coord_grids_xy.transpose(2, 3)) - mu_outer
return mu_x, mu_y, covar, heatmaps_norm.view(b, c, h, w)
return mu_x, mu_y, heatmaps_norm.view(b, c, h, w)
def gaussian_params_to_heatmap(grid_x, grid_y, mu_x, mu_y, covar, out_h, out_w):
"""Converts Gaussian parameters to heatmaps
Args:
grid_x, grid_y: 1 x 1 x (HW)
mu_x, mu_y: B x K
covar: B x K x 2 x 2
"""
# B x K x HW
B, K = mu_x.shape
xx = grid_x - mu_x.unsqueeze(2)
yy = grid_y - mu_y.unsqueeze(2)
# B x K x HW x 2
xxyy_t = torch.stack((xx, yy), dim=3)
covar_inv = torch.inverse(covar)
new_dist = xxyy_t*torch.matmul(xxyy_t, covar_inv)
new_dist_norm = 1.0/(1+new_dist.sum(3))
new_dist_rshp = new_dist_norm.view(B, K, out_h, out_w)
return new_dist_rshp
class MyUpsample(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.upsample = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = x.float()
if self.mode == 'bilinear':
x = self.upsample(x, scale_factor=self.scale_factor, mode=self.mode,
align_corners=True)
else:
x = self.upsample(x, scale_factor=self.scale_factor, mode=self.mode)
return x
def decoder_block(in_filters, out_filters, transpose=False, norm=nn.InstanceNorm2d):
if transpose:
return nn.Sequential(nn.ConvTranspose2d(in_filters, out_filters, kernel_size=3, stride=2, padding=1, output_padding=1),
norm(out_filters),
nn.ReLU(inplace=True))
else:
return nn.Sequential(conv_ReLU(in_filters, out_filters, 3, stride=1, padding=1, use_norm=True, norm=norm),
MyUpsample(scale_factor=2, mode='bilinear'),
conv_ReLU(out_filters, out_filters, 3, stride=1, padding=1, use_norm=True, norm=norm))
def encoder_block(in_filters, out_filters, norm=nn.InstanceNorm2d):
"""helper function to return two 3x3 convs with the 1st being stride 2
"""
return nn.Sequential(conv_ReLU(in_filters, out_filters, 3, stride=2, padding=1, use_norm=True, norm=norm),
conv_ReLU(out_filters, out_filters, 3, stride=1, padding=1, use_norm=True, norm=norm))
| UnsupervisedLandmarkLearning-master | models/submodules.py |
"""Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Parameterized Unet module that we use to construct our shape and appearance encoders
"""
import torch.nn as nn
from .submodules import conv_ReLU, encoder_block, decoder_block
class Unet(nn.Module):
def __init__(self, num_input_channels, decoder_out_channels, num_downsamples, num_upsamples, filters):
super(Unet, self).__init__()
# decoder will have 1 fewer upsamples than the encoder if True
self.num_downsamples = num_downsamples
self.num_upsamples = num_upsamples
self.decoder_out_channels = decoder_out_channels
self.norm = nn.InstanceNorm2d
# encoder will just be a set of downsampling layers if False
assert(len(decoder_out_channels) == num_upsamples + 1)
self.encoder_layers, per_layer_channels, f_block_filters = self.construct_encoder(num_input_channels, filters)
self.decoder_layers, self.skip_convs = self.construct_decoder(per_layer_channels, self.decoder_out_channels)
assert(len(self.skip_convs) == num_upsamples)
self.f_filters = f_block_filters
def construct_encoder(self, num_input_channels, filters):
"""Helper function to return the encoder layers
Args:
num_input_channels (int): number of inuput channels to the encoder.
filters (int): 1st encoder layer feature dimension (doubles every layer).
Returns:
torch.nn.ModuleList: module list of encoder layers
per_channel_filters (List(int)): List to keep track of feature dimensions per layer.
f_block_filters (int): feature dimension output of the first convolutional block
"""
if filters == 0:
filters = num_input_channels * 2
conv_1 = conv_ReLU(num_input_channels, filters, 3, stride=1, padding=1, norm=self.norm)
conv_2 = conv_ReLU(filters, filters*2, 3, stride=1, padding=1, norm=self.norm)
conv_2B = encoder_block(filters*2, filters*4, norm=self.norm)
layer_list = []
per_channel_filters = []
layer_list.append(nn.Sequential(conv_1, conv_2, conv_2B))
per_channel_filters.append(filters*4)
filters = filters * 4
f_block_filters = filters
for ds in range(self.num_downsamples-1):
layer_list.append(encoder_block(filters, filters*2, norm=self.norm))
filters = filters * 2
per_channel_filters.append(filters)
# return as a list such that we may need to index later for skip layers
return nn.ModuleList(layer_list), per_channel_filters, f_block_filters
def construct_decoder(self, enc_plc, decoder_out_channels):
"""
helper function to return upsampling convs for the decoder
enc_plc: encoder per-layer channels
output_channels: number of channels to output at final layer
"""
output_list = []
skip_convs = []
enc_plc_rev = enc_plc[::-1]
# first take in last output from encoder
in_channels = enc_plc_rev[0]
for us in range(self.num_upsamples+1):
if us == 0: # first one just conv
output_list.append(conv_ReLU(in_channels, in_channels, 1))
else:
out_channels = decoder_out_channels[us-1]
mapping_conv = conv_ReLU(enc_plc_rev[us-1], in_channels, 1, use_norm=False)
# map encoder outputs to match current inputs
if us == self.num_upsamples: # if last one
dec_layer = nn.Sequential(decoder_block(in_channels, out_channels),
nn.Conv2d(out_channels, decoder_out_channels[-1], 1))
else:
dec_layer = decoder_block(in_channels, out_channels)
output_list.append(dec_layer)
skip_convs.append(mapping_conv)
in_channels = out_channels
return nn.ModuleList(output_list), nn.ModuleList(skip_convs)
def forward(self, input, output_first_featmap=False):
encoder_outputs = []
output = input
# encode, and save the per-layer outputs
for layer in self.encoder_layers:
output = layer(output)
encoder_outputs.append(output)
for i in range(len(self.decoder_layers)):
if i == 0:
output = self.decoder_layers[i](encoder_outputs[-1])
else:
# apply skip conv on input
encoder_skip_feats = self.skip_convs[i-1](encoder_outputs[-i])
output = self.decoder_layers[i](output + encoder_skip_feats)
if output_first_featmap:
return output, encoder_outputs[0]
return output
| UnsupervisedLandmarkLearning-master | models/unet.py |
UnsupervisedLandmarkLearning-master | models/__init__.py |
|
"""
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Network definition for our shape and appearance encoder model.
Heavily inspired by the network architecture described in https://arxiv.org/pdf/1903.06946.pdf
"""
import torch
import torch.nn as nn
import torch.nn.functional
from collections import namedtuple
from .submodules import estimate_gaussian_params, gaussian_params_to_heatmap
from .generator import SPADEGenerator
from .unet import Unet
class PartFactorizedModel(nn.Module):
def __init__(self, args):
super(PartFactorizedModel, self).__init__()
self.n_landmarks = args['n_landmarks']
appearance_feat_dim = args['n_filters']
# half_res_out means output is half resolution of input
self.downsampler = torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.use_identity_covariance = args['use_identity_covariance']
self.fixed_covar = args['fixed_covar']
self.img_size = args['img_size']
self.use_fg_bg_mask = args['use_fg_bg_mask']
self.shape_encoder = Unet(3, [512, 256, 128, self.n_landmarks], 4, 3, args['nsf'])
additional_filters = self.shape_encoder.f_filters
self.appearance_encoder = Unet(self.n_landmarks + additional_filters,
[64, appearance_feat_dim], 1, 1, args['naf'])
# setup generator opts for SPADE
SPADEOptions = namedtuple('Options', ['ngf', 'appearance_nc', 'semantic_nc', 'norm_G'])
opt = SPADEOptions(ngf=args['ngc'], appearance_nc=appearance_feat_dim,
semantic_nc=self.n_landmarks, norm_G='spectralspadeinstance3x3')
self.image_decoder = SPADEGenerator(opt, self.img_size, self.img_size)
if self.use_fg_bg_mask: # use foreground background masking
print("Using foreground-background masking!")
self.bg_net = Unet(3, [32, 64, 128, 3], 3, 3, 32)
mask_outdim = 1
if args['low_res_mask']: # use a lower resolution mask to avoid encoding hifreq detail
self.fg_mask_net = nn.Sequential(Unet(self.n_landmarks, [32, mask_outdim], 3, 1, 32),
nn.Upsample(scale_factor=4, mode='bilinear'))
else:
self.fg_mask_net = Unet(self.n_landmarks, [32, 32, 32, mask_outdim], 3, 3, 32)
self.sigmoid = nn.Sigmoid()
self.use_identity_covariance = args['use_identity_covariance']
# coordinate grids that we'll need later for estimating gaussians in coordinate space
with torch.no_grad():
coord_range = torch.arange(-1, 1, 4.0/self.img_size)
self.grid_y, self.grid_x = torch.meshgrid(coord_range.float(), coord_range.float())
self.grid_x = self.grid_x.cuda().contiguous().view(1, 1, -1)
self.grid_y = self.grid_y.cuda().contiguous().view(1, 1, -1)
coord_range_2x = torch.arange(-1, 1, 2.0/self.img_size)
self.grid_y_2x, self.grid_x_2x = torch.meshgrid(coord_range_2x.float(), coord_range_2x.float())
self.grid_x_2x = self.grid_x_2x.cuda().contiguous().view(1, 1, -1)
self.grid_y_2x = self.grid_y_2x.cuda().contiguous().view(1, 1, -1)
def encoding2part_maps(self, encoding, estimate_cov=True):
'''
takes in the encoded input B x num_landmarks x H x W , as output from an encoder,
and creates a B x num_landmarks x H x W normalized landmark heatmaps
'''
b, c, h, w = encoding.shape
if estimate_cov:
mu_x, mu_y, covar, htmp = estimate_gaussian_params(encoding, self.grid_x, self.grid_y,
return_covar=True, use_fixed_covar=self.use_identity_covariance,
fixed_covar=self.fixed_covar)
return htmp, (mu_x, mu_y, covar)
else:
mu_x, mu_y, htmp = estimate_gaussian_params(encoding, self.grid_x, self.grid_y, return_covar=False)
return htmp, (mu_x, mu_y)
def construct_downsampling_layers(self):
"""
parameter-free encoder.
Just a stack of downsampling layers
"""
return nn.ModuleList([torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1)])
def pool_appearance_maps(self, featmap, heatmaps):
"""Spatially pools appearance features from featmap based on heatmaps
Dim-C appearance features
K target parts/landmarks
Args:
featmap (torch.tensor): B x C x H x W feature maps
heatmaps (torch.tensor): B x K x H x W normalized heatmaps
Returns:
torch.tensor: B x K x C pooled appearance vectors
"""
featmap_expdim = featmap.unsqueeze(1) # B x 1 x C x H x W
heatmaps_expdim = heatmaps.unsqueeze(2) # B x K x 1 x H x W
fh_prod = featmap_expdim * heatmaps_expdim # B x K x C x H x W
return fh_prod.mean(dim=3).mean(dim=3)
def project_appearance_onto_part_map(self, appearance_vectors, heatmaps):
"""
Args:
appearance_vectors (torch.tensor): B x K x C appearance vectors (C-dim vector per part)
heatmaps (torch.tensor): B x K x H x W normalized heatmaps
Returns:
torch.tensor: B x C x H x W projected appearance map (reduced over K)
"""
# B x K x C x 1 x 1
appearance_vectors_expdim = appearance_vectors.unsqueeze(3).unsqueeze(3)
# B x K x 1 x H x W
heatmaps_expdim = heatmaps.unsqueeze(2)
# B x K x C x H x W
ah_prod = appearance_vectors_expdim * heatmaps_expdim
ah_prod_norm = ah_prod / (1+heatmaps_expdim.sum(1, True))
return ah_prod_norm.sum(dim=1) # reduce over k
def forward(self, color_jittered_input, warped_input=None, cj_gauss_means_x=None, cj_gauss_means_y=None, cj_gauss_covars=None):
use_input_gaussians = False
# terminology: cj (color-jittered, appearance varied), w (warped, pose varied)
if cj_gauss_means_x is not None:
# this block should only happen if we're generating images conditioned on externally provided Gaussians
# used as an inference mode
use_input_gaussians = True
assert(color_jittered_input is None)
assert(warped_input is not None)
assert(cj_gauss_means_y is not None)
cj_gauss_params = (cj_gauss_means_x, cj_gauss_means_y, cj_gauss_covars)
cj_part_maps = None
if not use_input_gaussians:
color_jittered_input_shape_enc = self.shape_encoder(color_jittered_input)
color_jittered_input_shape_enc = color_jittered_input_shape_enc[:, 0:self.n_landmarks, :, :]
cj_part_maps, cj_gauss_params = self.encoding2part_maps(color_jittered_input_shape_enc)
if warped_input is None:
return {'vis_centers': (cj_gauss_params[0], cj_gauss_params[1]),
'vis_cov': cj_gauss_params[2],
'input_a_heatmaps': cj_part_maps}
warped_input_shape_enc, shape_encoder_first_layer_feats = self.shape_encoder(warped_input, True)
warped_input_shape_enc = warped_input_shape_enc[:, 0:self.n_landmarks, :, :]
w_part_maps, w_gauss_params = self.encoding2part_maps(warped_input_shape_enc, True)
cj_gauss_maps = gaussian_params_to_heatmap(self.grid_x_2x, self.grid_y_2x, cj_gauss_params[0],
cj_gauss_params[1], cj_gauss_params[2], self.img_size, self.img_size)
w_gauss_maps = gaussian_params_to_heatmap(self.grid_x_2x, self.grid_y_2x, w_gauss_params[0],
w_gauss_params[1], w_gauss_params[2], self.img_size, self.img_size)
# extract appearance representation from the warped image
appearance_enc_input = torch.cat((w_part_maps, shape_encoder_first_layer_feats), dim=1)
appearance_enc = self.appearance_encoder(appearance_enc_input)
# spatial average pool over appearance info using original normalized part maps
# should be B x K x C
appearance_vectors = self.pool_appearance_maps(appearance_enc, w_part_maps)
# project apearance information onto the heatmap of the color-jittered image
# output should be B x C x H x W
projected_part_map = self.project_appearance_onto_part_map(appearance_vectors, cj_gauss_maps)
decoded_image = self.image_decoder(cj_gauss_maps, projected_part_map).clone()
reconstruction = decoded_image
return_dict = {'reconstruction': reconstruction,
'vis_centers': (cj_gauss_params[0], cj_gauss_params[1]),
'input_a_gauss_params': cj_gauss_params,
'input_a_heatmaps': cj_part_maps,
'input_a_gauss_maps': cj_gauss_maps,
'input_b_gauss_params': w_gauss_params,
'input_b_heatmaps': w_part_maps}
if self.use_fg_bg_mask: # if using foreground-background factorization
foreground_mask = self.sigmoid(self.fg_mask_net(cj_gauss_maps))
warped_fg_mask = self.sigmoid(self.fg_mask_net(w_gauss_maps))
background_recon = self.bg_net((1-warped_fg_mask) * warped_input)
return_dict['reconstruction'] = background_recon * (1-foreground_mask) + decoded_image * foreground_mask
return_dict['background_recon'] = background_recon
return_dict['decoded_foreground'] = decoded_image
return_dict['input_a_fg_mask'] = foreground_mask
return_dict['input_b_fg_mask'] = warped_fg_mask
return return_dict
| UnsupervisedLandmarkLearning-master | models/part_factorized_model.py |
"""
Copyright (C) 2019,2020 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
Modifications made to adapt the SPADE code to this work.
"""
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as spectral_norm
from .normalization import SPADE
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param.numel()
print('Network [%s] was created. Total number of parameters: %.1f million. '
'To see the architecture, do print(network).'
% (type(self).__name__, num_params / 1000000))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if classname.find('BatchNorm2d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init.normal_(m.weight.data, 1.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=1.0)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'none': # uses pytorch's default init method
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
self.apply(init_func)
# propagate to children
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain)
# Using a stripped down version compared to the version found in
# https://github.com/NVlabs/SPADE/blob/master/models/networks/generator.py
class SPADEGenerator(BaseNetwork):
def __init__(self, opt, sw, sh):
super().__init__()
self.opt = opt
nf = opt.ngf
# Hard coded here
self.sw = sw // 16
self.sh = sh // 16
# Make the network deterministic by starting with
# downsampled feature map and segmentation map
# VAE option removed
self.fc = nn.Conv2d(self.opt.appearance_nc, 4 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
self.up_0 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_1 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, seg, appearance):
"""
Args:
seg (torch.tensor): segmentation map info. In the case of this work,
we use landmark heatmaps (projected gaussians)
appearance (torch.tensor): appearance information. This tensor holds the appearance
information for each part (ie the style information)
Returns:
x (torch.tensor): Rendered output image
"""
# First downsample the style information
x_0 = F.interpolate(appearance, size=(self.sh, self.sw))
x_1 = F.interpolate(appearance, size=(self.sh * 2, self.sw * 2))
# apply first layers on two scales, 1/16 and 1/8
# this is a modification made for this work, as the 1/16 scale tends to be too small
x_0 = self.fc(x_0)
x_0 = self.head_0(x_0, seg)
x_1 = self.fc(x_1)
x_1 = self.head_0(x_1, seg)
x = 0.5*self.up(x_0) + 0.5*x_1
x = self.G_middle_0(x, seg)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1))
# there used to be a tanh here but we're going to make do without it
return x
# ResNet block that uses SPADE.
# It differs from the ResNet block of pix2pixHD in that
# it takes in the segmentation map as input, learns the skip connection if necessary,
# and applies normalization first and then convolution.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability.
class SPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_G:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = opt.norm_G.replace('spectral', '')
self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc)
self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
| UnsupervisedLandmarkLearning-master | models/generator.py |
"""Implementation for various loss modules
GAN loss adapted from pix2pixHD (see comment below)
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
class PerceptualLoss(nn.Module):
def __init__(self):
super(PerceptualLoss, self).__init__()
self.MSELoss = torch.nn.MSELoss()
def normalize_batch(self, batch, div_factor=255.):
# normalize using imagenet mean and std
mean = batch.data.new(batch.data.size())
std = batch.data.new(batch.data.size())
mean[:, 0, :, :] = 0.485
mean[:, 1, :, :] = 0.456
mean[:, 2, :, :] = 0.406
std[:, 0, :, :] = 0.229
std[:, 1, :, :] = 0.224
std[:, 2, :, :] = 0.225
batch = torch.div(batch, div_factor)
batch -= mean
batch = torch.div(batch, std)
return batch
def forward(self, x, y):
x = self.normalize_batch(x)
y = self.normalize_batch(y)
return self.L1Loss(x, y)
# for reference
indx2name = {0: 'conv1_1', 1: 'relu1_1', 2: 'conv1_2', 3: 'relu1_2',
4: 'pool1', 5: 'conv2_1', 6: 'relu2_1', 7: 'conv2_2', 8: 'relu2_2', 9: 'pool2',
10: 'conv3_1', 11: 'relu3_1', 12: 'conv3_2', 13: 'relu3_2', 14: 'conv3_3',
15: 'relu3_3', 16: 'conv3_4', 17: 'relu3_4', 18: 'pool3',
19: 'conv4_1', 20: 'relu4_1', 21: 'conv4_2', 22: 'relu4_2', 23: 'conv4_3',
24: 'relu4_3', 25: 'conv4_4', 26: 'relu4_4', 27: 'pool4',
28: 'conv5_1', 29: 'relu5_1', 30: 'conv5_2', 31: 'relu5_2', 32: 'conv5_3',
33: 'relu5_3', 34: 'conv5_4', 35: 'relu5_4'}
# keep 3, 8, 13, 22
class Vgg19PerceptualLoss(PerceptualLoss):
def __init__(self, reduced_w, layer_name='relu5_2'):
super(Vgg19PerceptualLoss, self).__init__()
self.vgg19_layers = nn.Sequential(*list(models.vgg19(pretrained=True).features.children())[:23])
self.MSELoss = torch.nn.MSELoss()
# set hooks on layers indexed 3, 8, 13 and 22
# registers the hook to target layers
# allowing us to extract the outputs and store in
# self.extracted_feats
# be sure to clear self.extracted_feats
# before use
self.extracted_feats = []
def feature_extract_hook(module, inputs, outputs):
self.extracted_feats.append(outputs)
self.extract_layers = [3, 8, 13, 22] # assume last one will be input layer 0
if reduced_w:
self.loss_weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
else:
self.loss_weights = [1.0, 1.0, 1.0, 1.0, 1.0]
for layer_id in self.extract_layers:
self.vgg19_layers[layer_id].register_forward_hook(feature_extract_hook)
# disable grad for all VGG params
for param in self.parameters():
param.requires_grad = False
def forward(self, x, y, div_factor=1):
x[y == 0] = 0.0
self.extracted_feats = []
_ = self.vgg19_layers(x)
x_feats = self.extracted_feats
x_feats.append(x)
self.extracted_feats = []
_ = self.vgg19_layers(y)
y_feats = self.extracted_feats
y_feats.append(y)
layer_mse_losses = []
for i in range(len(x_feats)):
layer_mse_losses.append(self.MSELoss(x_feats[i], y_feats[i]))
full_loss = 0.0
for i in range(len(x_feats)):
full_loss += self.loss_weights[i] * layer_mse_losses[i]
return full_loss
"""
Adapted from
https://github.com/NVIDIA/pix2pixHD/blob/master/models/networks.py
"""
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real).cuda()
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real).cuda()
return self.loss(input[-1], target_tensor)
| UnsupervisedLandmarkLearning-master | models/losses.py |
"""
Original source: https://github.com/NVlabs/SPADE/blob/master/models/networks/normalization.py
Modifications made to adapt to this work
Copyright (C) 2019,2020 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import re
import torch.nn as nn
import torch.nn.functional as F
# Creates SPADE normalization layer based on the given configuration
# SPADE consists of two steps. First, it normalizes the activations using
# your favorite normalization method, such as Batch Norm or Instance Norm.
# Second, it applies scale and bias to the normalized output, conditioned on
# the segmentation map.
# The format of |config_text| is spade(norm)(ks), where
# (norm) specifies the type of parameter-free normalization.
# (e.g. syncbatch, batch, instance)
# (ks) specifies the size of kernel in the SPADE module (e.g. 3x3)
# Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5.
# Also, the other arguments are
# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
# Note: syncbatch support removed for this project
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
# removed some normalization options here since we won't be using them
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='bilinear')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
| UnsupervisedLandmarkLearning-master | models/normalization.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.