python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
import torch
from nemo.utils import logging
from rdkit import Chem
from pysmilesutils.augment import SMILESAugmenter
from typing import List
import numpy as np
import math
from nemo_chem.tokenizer import MolEncTokenizer
import time
__all__ = ['PrepareDataset']
class PrepareDataset:
def __init__(self, tokenizer: MolEncTokenizer, seq_length: int,
pad_size_divisible_by_8: bool, **kwargs):
self.tokenizer = tokenizer
self.seq_length = seq_length
self.pad_size_divisible_by_8 = pad_size_divisible_by_8
def _check_seq_len(self, tokens):
""" Warn user and shorten sequence if the tokens are too long, otherwise return original
Args:
tokens (List[List[str]]): List of token sequences
mask (List[List[int]]): List of mask sequences
Returns:
tokens (List[List[str]]): List of token sequences (shortened, if necessary)
mask (List[List[int]]): List of mask sequences (shortened, if necessary)
"""
seq_len = max([len(ts) for ts in tokens])
if seq_len > self.seq_length:
tokens_short = [ts[:self.seq_length] for ts in tokens]
return tokens_short
return tokens
def _canonicalize_smile(self, smile):
mol = Chem.MolFromSmiles(smile)
canon_smile = Chem.MolToSmiles(mol, canonical=True)
return canon_smile
def convert_tokens_to_smiles(self, tokens, canonical: True):
"""Take in a token array and convert it back to a canonicalized smile"""
smiles = self.tokenizer.detokenize(tokens)
if canonical:
canon_smiles = [self._canonicalize_smile(smile) for smile in smiles]
return canon_smiles
return smiles
def _pad_seqs(self, seqs, pad_token):
pad_length = max([len(seq) for seq in seqs])
if self.pad_size_divisible_by_8:
pad_length = int(math.ceil(pad_length/8) * 8)
padded = [np.append(seq, np.array([pad_token] * (pad_length - len(seq)))) for seq in seqs]
masks = [([1] * len(seq)) + ([0] * (pad_length - len(seq))) for seq in seqs] # 1/True = Active, 0/False = Inactive
return padded, masks
def _prepare_tokens(self, token_ids, canonicalize: bool = False):
"""Prepare tokens for encoder or decoder from batch of input SMILES strings
Args:
batch (List[str]): Batch of input SMILES strings
tokenizer: Tokenizer instantiation.
mask (bool, optional): Mask decoder tokens. Defaults to False.
canonicalize (bool, optional): Canonicalize input SMILES. Defaults to False.
smiles_augmenter (optional): Function to augment SMILES. Defaults to None.
Returns:
dict: token output
"""
tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
#canonicalize all ids
canon_target = self.convert_tokens_to_smiles(tokens, canonical=False)
# pad and optionally mask the tokens
token_ids = self._check_seq_len(token_ids)
token_output = {
"token_ids": token_ids,
"target_smiles": canon_target
}
return token_output
def collate_fn(self, batch: List[np.array], label_pad: int = -1):
encoder_tokens = self._prepare_tokens(batch, canonicalize=False)
enc_token_ids, enc_pad_mask = self._pad_seqs(encoder_tokens['token_ids'], self.tokenizer.pad_id)
enc_token_ids = torch.tensor(enc_token_ids, dtype=torch.int64) #converting a list into torch tensor is very slow, convert to np.array first
enc_pad_mask = torch.tensor(enc_pad_mask, dtype=torch.int64)
decoder_tokens = self._prepare_tokens(batch, canonicalize=False)
label_ids = [sample + [self.tokenizer.eos_id] for sample in decoder_tokens['token_ids']] # assign label_ids before adding bos_id to decoder
dec_token_ids = [[self.tokenizer.bos_id] + sample for sample in decoder_tokens['token_ids']]
dec_token_ids, dec_pad_mask = self._pad_seqs(dec_token_ids, self.tokenizer.pad_id)
dec_token_ids = torch.tensor(dec_token_ids, dtype=torch.int64)
dec_pad_mask = torch.tensor(dec_pad_mask, dtype=torch.int64)
label_token_ids, loss_mask = self._pad_seqs(label_ids, self.tokenizer.pad_id)
label_token_ids = torch.tensor(label_token_ids, dtype=torch.int64)
loss_mask = torch.tensor(loss_mask, dtype=torch.int64)
label_token_ids[~loss_mask.to(torch.bool)] = label_pad
collate_output = {
"text_enc": enc_token_ids,
"enc_mask": enc_pad_mask,
"text_dec": dec_token_ids,
"dec_mask": dec_pad_mask,
'labels': label_token_ids,
'loss_mask': loss_mask,
'target_smiles': encoder_tokens['target_smiles']} # smiles strings
return collate_output
| MegaMolBART-dev | nemo_chem/data/prepare_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .augment import *
from .molecule_binary_dataset import *
from .csv_dataset import *
from .prepare_dataset import *
from .utils import *
from .preprocess.preprocess import *
from .preprocess.csv_to_binary import *
| MegaMolBART-dev | nemo_chem/data/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.utils import logging
from rdkit import Chem
import math
from pysmilesutils.augment import SMILESAugmenter
from typing import List
import numpy as np
import math
import random
from nemo.collections.common.tokenizers.char_tokenizer import TokenizerSpec
__all__ = ['MoleculeEnumeration']
# FIXME: apply masking on ids instead of tokens
class MoleculeEnumeration(object):
def __init__(self, tokenizer: TokenizerSpec, seq_length: int,
encoder_augment: bool, encoder_mask: bool,
decoder_augment: bool, decoder_mask: bool,
canonicalize_input: bool, pad_size_divisible_by_8: bool,
mask_scheme: str, mask_prob: float, span_lambda: float,
**kwargs):
self.tokenizer = tokenizer
self.seq_length = seq_length
self.encoder_augment = encoder_augment
self.encoder_mask = encoder_mask
self.decoder_augment = decoder_augment
self.decoder_mask = decoder_mask
self.canonicalize_input = canonicalize_input
self.pad_size_divisible_by_8 = pad_size_divisible_by_8 # workaround for CUDA alignment bug
self.mask_scheme = mask_scheme
self.mask_prob = mask_prob
self.span_lambda = span_lambda
# self.aug = CanonicalSMILESAugmenter().randomize_mol_restricted
def _smiles_augmeter_func(self, smiles: str, augment_data: bool, canonicalize_input: bool):
"""Regularize SMILES by coverting to RDKit mol objects and back
Args:
smiles (str): Input SMILES from dataset
canonicalize_input (bool, optional): Canonicalize by default. Defaults to False.
smiles_augmenter: Function to augment/randomize SMILES. Defaults to None
"""
mol = Chem.MolFromSmiles(smiles)
canon_smiles = Chem.MolToSmiles(mol, canonical=True) if canonicalize_input else smiles
if augment_data:
# aug_mol = self.aug(mol)
atom_order = list(range(mol.GetNumAtoms()))
np.random.shuffle(atom_order)
aug_mol = Chem.RenumberAtoms(mol, atom_order) # TODO how to use PySMILESutils for this
# There is a very rare possibility that RDKit will not be able to generate
# the SMILES for the augmented mol. In this case we just use the canonical
# mol to generate the SMILES
try:
aug_smiles = Chem.MolToSmiles(aug_mol, canonical=False)
except RuntimeError:
logging.info(f'Could not generate smiles for {smiles} after augmenting. Forcing canonicalization')
aug_smiles = canon_smiles if canonicalize_input else Chem.MolToSmiles(mol, canonical=True)
else:
aug_smiles = Chem.MolToSmiles(mol, canonical=False)
assert len(aug_smiles) > 0, AssertionError('Augmented SMILES string is empty')
assert len(canon_smiles) > 0, AssertionError('Canonical SMILES string is empty')
return aug_smiles, canon_smiles
def _check_seq_len(self, tokens: List[List[str]], mask: List[List[int]]):
""" Warn user and shorten sequence if the tokens are too long, otherwise return original
Args:
tokens (List[List[str]]): List of token sequences
mask (List[List[int]]): List of mask sequences
Returns:
tokens (List[List[str]]): List of token sequences (shortened, if necessary)
mask (List[List[int]]): List of mask sequences (shortened, if necessary)
"""
seq_len = max([len(ts) for ts in tokens])
if seq_len > self.seq_length:
tokens_short = [ts[:self.seq_length] for ts in tokens]
mask_short = [ms[:self.seq_length] for ms in mask]
return (tokens_short, mask_short)
return (tokens, mask)
def _prepare_tokens(self, batch: List[str], mask_data: bool = False):
"""Prepare tokens for encoder or decoder from batch of input SMILES strings
Args:
batch (List[str]): Batch of input SMILES strings
augment_data (bool): Augment SMILES
mask_data (bool, optional): Mask decoder tokens. Defaults to False.
Returns:
dict: token output
"""
# Tokenize with optional masking, padding is done later due to differences in encoder/decoder bos/eos tokens
token_output = self.tokenize(batch, mask=mask_data)
if mask_data:
tokens = token_output['masked_tokens']
mask = token_output['token_masks']
else:
tokens = token_output['original_tokens']
mask = [[True] * len(ts) for ts in tokens] # 1/True = Active, 0/False = Inactive
# Verify sequence length
tokens, mask = self._check_seq_len(tokens, mask)
token_output = {
"tokens": tokens,
"mask": mask
}
return token_output
def _pad_seqs(self, seqs, pad_token):
# TODO: switch to torch.nn.utils.rnn.pad_sequence
pad_length = max([len(seq) for seq in seqs])
if self.pad_size_divisible_by_8:
pad_length = int(math.ceil(pad_length/8) * 8)
padded = [seq + ([pad_token] * (pad_length - len(seq))) for seq in seqs]
masks = [([1] * len(seq)) + ([0] * (pad_length - len(seq))) for seq in seqs] # 1/True = Active, 0/False = Inactive
return padded, masks
def collate_fn(self, batch: List[str], label_pad: int = -1):
"""Collate function for NeMo MegaMolBART. Format of data has been altered for NeMo per 'NB' comments.
This code should be cleaned up and validated once new tokenizer from NeMo is incorporated."""
# Dimensions required by NeMo: [batch, sequence + padding]
# Encoder
encoder_smiles_list = [self._smiles_augmeter_func(smiles, augment_data=self.encoder_augment, canonicalize_input=self.canonicalize_input)
for smiles in batch]
encoder_smiles = [x[0] for x in encoder_smiles_list]
canon_targets = [x[1] for x in encoder_smiles_list]
encoder_dict = self._prepare_tokens(encoder_smiles, mask_data=self.encoder_mask)
encoder_tokens = encoder_dict['tokens'] # TODO boolean masks are never used from this function -- remove
enc_token_ids = [self.tokenizer.token_to_ids(t) for t in encoder_tokens]
enc_token_ids, encoder_mask = self._pad_seqs(enc_token_ids, self.tokenizer.pad_id)
enc_token_ids = torch.tensor(enc_token_ids, dtype=torch.int64)
encoder_mask = torch.tensor(encoder_mask, dtype=torch.int64)
# Decoder
if self.decoder_augment:
decoder_smiles_list = [self._smiles_augmeter_func(smiles, augment_data=self.decoder_augment, canonicalize_input=False)
for smiles in encoder_smiles]
decoder_smiles = [x[0] for x in decoder_smiles_list]
else:
decoder_smiles = encoder_smiles
decoder_dict = self._prepare_tokens(decoder_smiles, mask_data=self.decoder_mask)
decoder_tokens = decoder_dict['tokens']
dec_token_ids = [self.tokenizer.token_to_ids(t) for t in decoder_tokens]
label_ids = [sample + [self.tokenizer.eos_id] for sample in dec_token_ids] # assign label_ids before adding bos_id to decoder
dec_token_ids = [[self.tokenizer.bos_id] + sample for sample in dec_token_ids]
dec_token_ids, decoder_mask = self._pad_seqs(dec_token_ids, self.tokenizer.pad_id)
dec_token_ids = torch.tensor(dec_token_ids, dtype=torch.int64)
decoder_mask = torch.tensor(decoder_mask, dtype=torch.int64)
label_token_ids, loss_mask = self._pad_seqs(label_ids, self.tokenizer.pad_id)
label_token_ids = torch.tensor(label_token_ids, dtype=torch.int64)
loss_mask = torch.tensor(loss_mask, dtype=torch.int64)
label_token_ids[~loss_mask.to(torch.bool)] = label_pad
collate_output = {'text_enc': enc_token_ids,
'enc_mask': encoder_mask,
'text_dec': dec_token_ids,
'dec_mask': decoder_mask,
'labels': label_token_ids,
'loss_mask': loss_mask,
'target_smiles': canon_targets} # smiles strings
return collate_output
def tokenize(self, sents1, mask=False):
# TODO this function needs cleanup
tokens = [self.tokenizer.text_to_tokens(s) for s in sents1]
m_tokens, token_masks = self.mask_tokens(tokens, empty_mask=not mask)
output = {}
output["original_tokens"] = tokens
if mask:
output["masked_tokens"] = m_tokens
output["token_masks"] = token_masks
return output
def mask_tokens(self, tokens, empty_mask=False):
if empty_mask:
mask = [[True] * len(ts) for ts in tokens]
return tokens, mask
masked_tokens = []
token_masks = []
for ts in tokens:
# FIXME: add config
# if self.mask_scheme == "replace":
# masked, token_mask = self._mask_replace(ts)
# elif self.mask_scheme == "span":
masked, token_mask = self._mask_span(ts)
# else:
# raise ValueError(f"Unrecognised mask scheme: {self.mask_scheme}")
masked_tokens.append(masked)
token_masks.append(token_mask)
return masked_tokens, token_masks
def _mask_replace(self, ts):
mask_bools = [True, False]
weights = [self.mask_prob, 1 - self.mask_prob]
token_mask = random.choices(mask_bools, weights=weights, k=len(ts))
masked = [self._mask_token(ts[i]) if m else ts[i] for i, m in enumerate(token_mask)]
return masked, token_mask
def _mask_span(self, ts):
curr_token = 0
masked = []
token_mask = []
mask_bools = [True, False]
weights = [self.mask_prob, 1 - self.mask_prob]
sampled_mask = random.choices(mask_bools, weights=weights, k=len(ts))
while curr_token < len(ts):
# If mask, sample from a poisson dist to get length of mask
if sampled_mask[curr_token]:
mask_len = torch.poisson(torch.tensor(self.span_lambda)).long().item()
masked.append(self.tokenizer.mask_token)
token_mask.append(True)
curr_token += mask_len
# Otherwise don't mask
else:
masked.append(ts[curr_token])
token_mask.append(False)
curr_token += 1
return masked, token_mask
def _mask_token(self, token):
# FIXME: not working
rand = random.random()
if rand < self.show_mask_token_prob:
return self.tokenizer.mask_token
elif rand < self.show_mask_token_prob + ((1 - self.show_mask_token_prob) / 2):
token_idx = random.choice(self.chem_token_idxs)
return self.decode_vocab[token_idx]
else:
return token
| MegaMolBART-dev | nemo_chem/data/augment.py |
# coding=utf-8
import os
import re
import math
import mmap
from typing import Optional
from dataclasses import dataclass
import torch
from nemo.core import Dataset, IterableDataset
from nemo.core.classes.dataset import DatasetConfig
from nemo.utils import logging
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import make_dataset
import time
__all__ = ['MoleculeBinaryDatasetConfig', 'MoleculeBinaryDataset']
@dataclass
class MoleculeBinaryDatasetConfig(DatasetConfig):
filepath: str = 'data.csv'
micro_batch_size: int = 1
use_iterable: bool = False
map_data: bool = False
encoder_augment: bool = True
encoder_mask: bool = False
decoder_augment: bool = False
canonicalize_input: bool = False
metadata_path: Optional[str] = None
num_samples: Optional[int] = None
drop_last: bool = False
shuffle: bool = False
num_workers: Optional[int] = None
pin_memory: bool = True # TODO: remove this if value is fixed
class MoleculeBinaryABCDataset(): # TODO should inheret from MegatronDataset
"""Molecule base dataset that reads tokenized data from binarized input files."""
def __init__(self, filepath: str, metadata_path: str = None, num_samples: int = None, map_data: bool = False):
"""
Args:
filepath (str): path to dataset file with unmasked tokenized smiles
"""
self.filepath = filepath
self._cache = None
def __len__(self):
return self.len
def _initialize_file(self):
start_time = time.time()
self.indexed_dataset = make_dataset(self.filepath,"mmap", skip_warmup=False)
self.len = self.indexed_dataset.sizes.shape[0]
assert self.indexed_dataset.sizes.shape[0] == self.indexed_dataset.doc_idx[-1]
logging.info(' > finished creating indexed dataset in {:4f} ' 'seconds'.format(time.time() - start_time))
logging.info(' > indexed dataset stats:')
logging.info(' number of documents: {}'.format(self.indexed_dataset.doc_idx.shape[0] - 1))
logging.info(' number of sentences: {}'.format(self.indexed_dataset.sizes.shape[0]))
def __exit__(self):
if self.map_data:
self.fh.close()
class MoleculeBinaryDataset(Dataset, MoleculeBinaryABCDataset):
"""Dataset that reads GPU-specific portion of data into memory from Binary file"""
def __init__(self, filepath: str, metadata_path: str = None, num_samples: int = None, map_data: bool = False, **kwargs):
super().__init__(filepath=filepath, metadata_path=metadata_path, num_samples=num_samples, map_data=map_data)
self._initialize_file()
def __getitem__(self, idx):
st = time.time()
if torch.is_tensor(idx):
idx = idx.item()
return self.indexed_dataset.get(idx)
| MegaMolBART-dev | nemo_chem/data/molecule_binary_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from enum import Enum
import re
import braceexpand
import os
from copy import deepcopy
from omegaconf import DictConfig, open_dict
import torch.utils.data as pt_data
from pytorch_lightning.trainer.trainer import Trainer
from nemo.utils import logging
from .csv_dataset import MoleculeCsvDataset
from .molecule_binary_dataset import MoleculeBinaryDataset
__all__ = ['DatasetTypes', 'expand_dataset_paths', 'build_train_valid_test_datasets']
class DatasetTypes(Enum):
zinc_csv = 0
def expand_dataset_paths(filepath: str, ext: str) -> List[str]:
"""Expand dataset paths from braces"""
filepath = filepath + ext if ext else filepath
# TODO this should eventually be moved to a Nemo fileutils module or similar
filepath = re.sub(r"""\(|\[|\<|_OP_""", '{', filepath) # replaces '(', '[', '<' and '_OP_' with '{'
filepath = re.sub(r"""\)|\]|\>|_CL_""", '}', filepath) # replaces ')', ']', '>' and '_CL_' with '}'
dataset_paths = list(braceexpand.braceexpand(filepath))
return dataset_paths
def check_paths_exist(dataset_paths, dataset_format):
"""Check that the expanded dataset paths are valid and they exist."""
errors = []
for filepath in dataset_paths:
if dataset_format == "csv":
if not os.path.exists(filepath):
errors.append(filepath)
if dataset_format == "bin":
binfile = filepath + ".bin"
if not os.path.exists(binfile):
errors.append(binfile)
return errors
def _build_train_valid_test_datasets(
cfg: DictConfig,
trainer: Trainer,
num_samples: int,
filepath: str,
metadata_path: str,
dataset_format: str
):
# TODO num_samples is currently not used
cfg = deepcopy(cfg)
with open_dict(cfg):
cfg['metadata_path'] = metadata_path
# Get datasets and load data
logging.info(f'Loading data from {filepath}')
dataset_paths = expand_dataset_paths(filepath, ".csv") if dataset_format == "csv" else expand_dataset_paths(filepath, None)
errors = check_paths_exist(dataset_paths, dataset_format)
assert len(errors) == 0, "Following files do not exist %s" % ' '.join(errors)
logging.info(f'Loading data from {dataset_paths}')
dataset_list = []
if dataset_format == "csv":
dataset = MoleculeCsvDataset(dataset_paths=dataset_paths, cfg=cfg)
elif dataset_format == "bin":
for path in dataset_paths:
data = MoleculeBinaryDataset(filepath=path, cfg=cfg, trainer=trainer, num_samples=num_samples)
dataset_list.append(data)
num_samples -= len(data)
if num_samples < 1:
break
if len(dataset_list) == 1:
dataset = dataset_list[0]
else:
dataset = pt_data.ConcatDataset(dataset_list)
else:
raise ValueError("Unrecognized data format. Expected csv or bin.")
return dataset
def build_train_valid_test_datasets(
cfg: DictConfig,
trainer: Trainer,
train_valid_test_num_samples: List[int]
):
# TODO metadata_file is currently not used
cfg = deepcopy(cfg)
with open_dict(cfg):
dataset_path = cfg.pop('dataset_path', '')
# dataset = cfg.pop('dataset')
metadata_file = cfg.pop('metadata_file', None)
dataset_format = cfg.pop('dataset_format')
ds_train = cfg.dataset.train
ds_val = cfg.dataset.val
ds_test = cfg.dataset.test
cfg.pop('dataset')
# Build individual datasets.
filepath = os.path.join(dataset_path, 'train', ds_train)
metadata_path = os.path.join(dataset_path, 'train', metadata_file) if metadata_file else None
train_dataset = _build_train_valid_test_datasets(cfg, trainer, train_valid_test_num_samples[0],
filepath, metadata_path, dataset_format)
filepath = os.path.join(dataset_path, 'val', ds_val)
metadata_path = os.path.join(dataset_path, 'val', metadata_file) if metadata_file else None
validation_dataset = _build_train_valid_test_datasets(cfg, trainer, train_valid_test_num_samples[1],
filepath, metadata_path, dataset_format)
filepath = os.path.join(dataset_path, 'test', ds_test)
metadata_path = os.path.join(dataset_path, 'test', metadata_file) if metadata_file else None
test_dataset = _build_train_valid_test_datasets(cfg, trainer, train_valid_test_num_samples[2],
filepath, metadata_path, dataset_format)
return (train_dataset, validation_dataset, test_dataset)
| MegaMolBART-dev | nemo_chem/data/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import pickle
from typing import Optional
from dataclasses import dataclass
import torch
import numpy as np
from nemo.core import Dataset
from nemo.utils import logging
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import CSVMemMapDataset
try:
from apex.transformer.parallel_state import get_rank_info
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ['MoleculeCsvDatasetConfig', 'MoleculeCsvDataset', 'DatasetFileConfig']
@dataclass
class DatasetFileConfig():
train: str = None
test: str = None
val: str = None
@dataclass
class MoleculeCsvDatasetConfig():
dataset_path: str = ''
dataset: DatasetFileConfig = None
newline_int: int = 10
header_lines: int = 1
data_col: int = 1
data_sep: str = ','
sort_dataset_paths: bool = True
# FIXME: remove unneeded config variables
skip_lines: int = 0
micro_batch_size: int = 1
encoder_augment: bool = False
encoder_mask: bool = False
decoder_augment: bool = False
decoder_mask: bool = False
canonicalize_input: bool = True
dataloader_type: str = 'single'
drop_last: bool = False
pin_memory: bool = False # must be False with CSV dataset
num_workers: Optional[int] = None
class MoleculeCsvDataset(CSVMemMapDataset):
"""
Allow per-line lazy access to multiple text files using numpy memmap.
"""
def __init__(self,
dataset_paths,
cfg,
workers=None):
super().__init__(
dataset_paths=dataset_paths,
newline_int=cfg.get('newline_int'),
header_lines=cfg.get('header_lines'), # skip first N lines
workers=workers,
tokenizer=None,
sort_dataset_paths=cfg.get('sort_dataset_paths'),
data_col=cfg.get('data_col'),
data_sep=cfg.get('data_sep'),
)
| MegaMolBART-dev | nemo_chem/data/csv_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Preprocess megamolbart data"
"Take text files as input and output tokenized data in binarized format"
##TODO Merge it with the CSV format data preprocessing script
import logging
import os
from glob import glob
from omegaconf import OmegaConf
import multiprocessing
import re
from typing import List
from rdkit import Chem
import numpy as np
import torch
from nemo.collections.nlp.data.language_modeling.megatron import indexed_dataset
from nemo.utils import logging
from nemo_chem.tokenizer import MolEncTokenizer, MolEncTokenizerFromVocabFileConfig
DATAFORMAT_EXT = [".csv", ".CSV"]
__all__ = ['CsvToBinary']
class CsvToBinary:
def __init__(self, input_dir: str, out_dir: str,
config, num_enumerations, num_workers):
"""
input_dir: This is the directory where the CSV data exists.
We support nested directory structure for input directories
out_dir: Directory path to save the bin files. out_dir will mimic
the same directory structure as input_dir and the output files
will be named identical to the input files, but with a .bin extension
num_enumerations: Number of enumerations to perform on every smile
num_workers: Number of workers to use for multi-processing.
"""
self.input_dir = input_dir
self.out_dir = out_dir
self.cfg = config
self.num_enumerations = num_enumerations
self.num_workers = num_workers
self.tokenizer = self._initialize_tokenizer()
assert os.path.isdir(input_dir), "Expected --input to be a directory."
self.inputfiles = [ifile for path, subdir, files in os.walk(self.input_dir)
for dformat in DATAFORMAT_EXT
for ifile in glob(os.path.join(path, "*" + dformat))]
if len(self.inputfiles) == 0:
raise FileNotFoundError('No CSV files found in folder.')
else:
logging.info(f'Found {len(self.inputfiles)} .csv files.')
# If the destination path is not the same as where the CSVs exist, make an identical
# folder structure as the input directory at the destination
if self.out_dir != self.input_dir:
os.makedirs(self.out_dir, exist_ok=True)
os.access(self.out_dir, os.W_OK)
# Create an identical folder structure in the output directory as the input dir.
for path, subdir, files in os.walk(self.input_dir):
subdir = path[len(self.input_dir)+1:]
os.makedirs(os.path.join(self.out_dir, subdir), exist_ok=True)
outbinfiles = []
outidxfiles = []
for path, subdir, files in os.walk(self.out_dir):
outbinfiles = [ifile for path, subdir, files in os.walk(self.out_dir)
for dformat in DATAFORMAT_EXT
for ifile in glob(os.path.join(path, "*.bin"))]
assert len(outbinfiles) == 0, "Found existing .bin files at the output location %s."
"Cannot overwrite the existing data. Please delete and retry." % outbinfiles
outidxfiles = [ifile for path, subdir, files in os.walk(self.out_dir)
for dformat in DATAFORMAT_EXT
for ifile in glob(os.path.join(path, "*.bin"))]
assert len(outidxfiles) == 0, "Found existing .idx files at the output location %s. "
"Cannot overwrite the existing data. Please delete and retry." % outidxfiles
def _initialize_tokenizer(self):
default_tokenizer = OmegaConf.create(MolEncTokenizerFromVocabFileConfig())
cfg_tokenizer = self.cfg.tokenizer if self.cfg.get('tokenizer', False) else default_tokenizer
merge_cfg_tokenizer = OmegaConf.merge(default_tokenizer, cfg_tokenizer)
if not os.path.exists(merge_cfg_tokenizer.vocab_path):
raise ValueError(f'Vocab file not found at {merge_cfg_tokenizer.vocab_path}')
# Initialize tokenizer
tokenizer = MolEncTokenizer.from_vocab_file(**merge_cfg_tokenizer)
return tokenizer
def prepare_dataset(self, num_workers=25):
pool = multiprocessing.Pool(self.num_workers)
for inputfile in self.inputfiles:
# Ignore metadata.csv files
if "metadata.csv" in inputfile: continue
subfolder_path = os.path.dirname(inputfile[len(self.input_dir)+1:])
ifilebasename = os.path.splitext(os.path.basename(inputfile))[0]
output_file = os.path.join(self.out_dir, subfolder_path, ifilebasename + ".bin")
index_file = os.path.join(self.out_dir, subfolder_path, ifilebasename + ".idx")
dataset_builder = indexed_dataset.make_builder(output_file, impl="mmap", vocab_size=self.tokenizer.vocab_size)
ifile = open(inputfile, "r")
out_iterator = pool.imap(self._process_data, ifile, num_workers)
for enc_token_ids in out_iterator:
# We may return an empty list when the row doesn't match with our regex query
if not enc_token_ids: continue
# If num_enumerations > 0, we will have more than one element
# in the list and we can't convert the list of lists into torch
# tensor because they all may have different lengths.
# Padding should only be done during training, so we cannot pad them here.
for num, enc_token_id in enumerate(enc_token_ids):
dataset_builder.add_item(torch.tensor(enc_token_id))
logging.debug(f'Created {num} canonicalized smiles.')
dataset_builder.end_document()
dataset_builder.finalize(index_file)
def _process_data(self, line):
# First column is zincID and second column is the smiles string
all_smiles = []
# Ignore header
# TODO: This is specific to the current AZ format. Make this a config param in future.
if not (re.match('^[0-9]+', line)): return all_smiles
zinc_id, smiles = line.strip().split(",")
all_smiles.append(smiles)
mol = Chem.MolFromSmiles(smiles)
atom_order: List[int] = list(range(mol.GetNumAtoms()))
num_enumerations = self.num_enumerations # NOTE: Don't increment self.num_enumerations to preserve starting value
while(num_enumerations):
np.random.shuffle(atom_order)
aug_mol = Chem.RenumberAtoms(mol, atom_order)
try:
aug_smiles = Chem.MolToSmiles(aug_mol, canonical=False)
if aug_smiles not in all_smiles:
all_smiles.append(aug_smiles)
except:
# If RDKit couldn't generate augmented smile, we ignore and try again
pass
num_enumerations -= 1
token_output = self.tokenizer.tokenize(all_smiles, pad=False)
enc_token_ids = self.tokenizer.convert_tokens_to_ids(token_output["original_tokens"])
return enc_token_ids
| MegaMolBART-dev | nemo_chem/data/preprocess/csv_to_binary.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import requests
import multiprocessing as mp
import pandas as pd
from datetime import datetime
from subprocess import run
from multiprocessing import Pool
from functools import partial
from rdkit import Chem
from nemo.utils import logging
MAX_LENGTH = 150
__all__ = ['Preprocess']
class Preprocess(object):
def __init__(self) -> None:
super().__init__()
self.retry = False
def _run_cmd(self, cmd, failure_error='Unexpected error while executing bash cmd'):
logging.debug(f'Running cmd: {cmd}')
process = run(['bash', '-c', cmd], capture_output=True, text=True)
if process.returncode != 0:
logging.error(failure_error)
sys.exit(process.returncode)
return process
def _process_file(self, url, download_dir='/tmp/zinc15/raw'):
filename = url.split('/')[-1]
if os.path.exists(os.path.join(download_dir, filename)):
logging.info(f'{url} already downloaded...')
return
logging.debug(f'Downloading file {filename}...')
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
tmp_filename = os.path.join(download_dir, filename + '_tmp')
header = True
with open(tmp_filename, 'w') as f:
for line in r.iter_lines():
if header:
header = False
f.write("zinc_id,SMILES\n")
continue
line = line.decode("utf-8")
splits = line.split("\t")
if len(splits) < 2:
continue
smi, zinc_id = splits[0], splits[1]
try:
mol = Chem.MolFromSmiles(smi)
smi = Chem.MolToSmiles(mol, canonical=True)
except RuntimeError:
continue
if len(smi) > MAX_LENGTH:
continue
f.write(f"{zinc_id},{smi}\n")
os.rename(tmp_filename, os.path.join(download_dir, filename))
return
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
logging.error(f'{url} Not found')
return
else:
logging.error(
f'Could not download file {url}: {e.response.status_code}')
raise e
def __processing_failure(self, e):
logging.info(f'Processing failure: {e}')
self.retry = True
def process_files(self, links_file, pool_size=8, download_dir='/tmp/zinc15/raw'):
"""
Download all the files in the links file.
Parameters:
links_file (str): File containing links to be downloaded.
pool_size (int): Number of processes to use.
download_dir (str): Directory to download the files to.
"""
logging.info(
f'Downloading files from {links_file} with poolsize {pool_size}...')
os.makedirs(download_dir, exist_ok=True)
with open(links_file, 'r') as f:
links = list(set([x.strip() for x in f]))
download_funct = partial(self._process_file, download_dir=download_dir)
while True:
pool = Pool(processes=pool_size)
pool.map_async(download_funct,
links,
error_callback=self.__processing_failure)
pool.close()
pool.join()
if self.retry:
logging.info(
'Retrying to download files that failed with 503...')
self.retry = False
else:
break
def _process_split(self, datafile, output_dir='/tmp/zinc15/processed/'):
filename = f'{output_dir}/split_data/{datafile}'
logging.info(f'Processing file {filename}...')
df = pd.read_csv(filename, header=None, names=['zinc_id', 'smiles'])
recs = int(df.shape[0] * 0.01)
test_df = df.sample(n=recs)
df = df.drop(test_df.index) # remove test data from training data
val_df = df.sample(n=recs)
df = df.drop(val_df.index) # remove test data from training data
df.to_csv(f'{output_dir}/train/{datafile}.csv', index=False)
test_df.to_csv(f'{output_dir}/test/{datafile}.csv', index=False)
val_df.to_csv(f'{output_dir}/val/{datafile}.csv', index=False)
with open(f'{output_dir}/val/metadata.csv', 'a') as f:
f.write(f"{datafile},{val_df.shape[0]}\n")
with open(f'{output_dir}/test/metadata.csv', 'a') as f:
f.write(f"{datafile},{test_df.shape[0]}\n")
with open(f'{output_dir}/train/metadata.csv', 'a') as f:
f.write(f"{datafile},{df.shape[0]}\n")
del df
del test_df
del val_df
def balanced_split(self, download_dir, output_dir, pool_size=8):
if os.path.exists(output_dir):
logging.info(f'{output_dir} already exists...')
os.rename(output_dir, output_dir +
datetime.now().strftime('%Y%m%d%H%M%S'))
split_data = os.path.join(output_dir, 'split_data')
os.makedirs(split_data, exist_ok=True)
os.makedirs(os.path.join(output_dir, 'train'), exist_ok=True)
os.makedirs(os.path.join(output_dir, 'test'), exist_ok=True)
os.makedirs(os.path.join(output_dir, 'val'), exist_ok=True)
self._run_cmd(f"cd {split_data}; tail -q -n +2 {download_dir}/** | split -d -l 10000000 -a 3",
failure_error='Error while merging files')
split_files = os.listdir(split_data)
with open(f'{output_dir}/val/metadata.csv', 'w') as f:
f.write(f"file,size\n")
with open(f'{output_dir}/test/metadata.csv', 'w') as f:
f.write(f"file,size\n")
with open(f'{output_dir}/train/metadata.csv', 'w') as f:
f.write(f"file,size\n")
with Pool(processes=pool_size) as pool:
split_funct = partial(self._process_split, output_dir=output_dir)
pool.map(split_funct,
split_files)
def prepare_dataset(self,
links_file='conf/model/data/ZINC-downloader.txt',
download_dir='/tmp/zinc15/raw',
output_dir='/tmp/zinc15/processed'):
"""
Download zinc15 dataset and slits it into train, valid, and test sets.
Parameters:
links_file (str): File containing links to be downloaded.
download_dir (str): Directory to download the files to.
output_dir (str): Directory to save the processed data to.
"""
# More than 8 cores may cause 503 errors. Please avoid larger pool size.
self.process_files(links_file,
pool_size=8,
download_dir=download_dir)
logging.info('Download complete.')
self.balanced_split(download_dir,
output_dir,
pool_size=8)
| MegaMolBART-dev | nemo_chem/data/preprocess/preprocess.py |
import logging
from contextlib import contextmanager
from rdkit import Chem
from hydra import compose, initialize
from nemo_chem.models.megamolbart import NeMoMegaMolBARTWrapper
log = logging.getLogger(__name__)
_INFERER = None
@contextmanager
def load_model(inf_cfg):
global _INFERER
if _INFERER is None:
_INFERER = NeMoMegaMolBARTWrapper(model_cfg=inf_cfg)
yield _INFERER
def test_smis_to_hiddens():
with initialize(config_path="../examples/chem/conf"):
cfg = compose(config_name="infer")
with load_model(cfg) as inferer:
smis = ['c1cc2ccccc2cc1',
'COc1cc2nc(N3CCN(C(=O)c4ccco4)CC3)nc(N)c2cc1OC',
'CC(=O)C(=O)N1CCC([C@H]2CCCCN2C(=O)c2ccc3c(n2)CCN(C(=O)OC(C)(C)C)C3)CC1']
hidden_state, pad_masks = inferer.smis_to_hidden(smis)
assert hidden_state is not None
assert hidden_state.shape[0] == len(smis)
assert hidden_state.shape[2] == inferer.cfg.max_position_embeddings
assert pad_masks is not None
def test_smis_to_embedding():
with initialize(config_path="../examples/chem/conf"):
cfg = compose(config_name="infer")
with load_model(cfg) as inferer:
smis = ['c1cc2ccccc2cc1',
'COc1cc2nc(N3CCN(C(=O)c4ccco4)CC3)nc(N)c2cc1OC',
'CC(=O)C(=O)N1CCC([C@H]2CCCCN2C(=O)c2ccc3c(n2)CCN(C(=O)OC(C)(C)C)C3)CC1']
embedding = inferer.smis_to_embedding(smis)
assert embedding is not None
assert embedding.shape[0] == len(smis)
assert embedding.shape[1] == inferer.cfg.max_position_embeddings
def test_hidden_to_smis():
with initialize(config_path="../examples/chem/conf"):
cfg = compose(config_name="infer")
with load_model(cfg) as inferer:
smis = ['c1cc2ccccc2cc1',
'COc1cc2nc(N3CCN(C(=O)c4ccco4)CC3)nc(N)c2cc1OC',
'CC(=O)C(=O)N1CCC([C@H]2CCCCN2C(=O)c2ccc3c(n2)CCN(C(=O)OC(C)(C)C)C3)CC1']
hidden_state, pad_masks = inferer.smis_to_hidden(smis)
infered_smis = inferer.hidden_to_smis(hidden_state, pad_masks)
log.info(f'Input SMILES and Infered: {smis}, {infered_smis}')
assert(len(infered_smis) == len(smis))
for smi, infered_smi in zip(smis, infered_smis):
log.info(f'Input and Infered:{smi}, {infered_smi}')
input_mol = Chem.MolFromSmiles(smi)
infer_mol = Chem.MolFromSmiles(infered_smi)
assert input_mol is not None and infer_mol is not None
canonical_smi = Chem.MolToSmiles(input_mol, canonical=True)
canonical_infered_smi = Chem.MolToSmiles(infer_mol, canonical=True)
log.info(f'Canonical Input and Infered: {canonical_smi}, {canonical_infered_smi}')
assert(canonical_smi == canonical_infered_smi)
def test_sample():
with initialize(config_path="../examples/chem/conf"):
cfg = compose(config_name="infer")
with load_model(cfg) as inferer:
smis = ['c1cc2ccccc2cc1',
'COc1cc2nc(N3CCN(C(=O)c4ccco4)CC3)nc(N)c2cc1OC',
'CC(=O)C(=O)N1CCC([C@H]2CCCCN2C(=O)c2ccc3c(n2)CCN(C(=O)OC(C)(C)C)C3)CC1']
samples = inferer.sample(smis, num_samples=10, sampling_method='greedy-perturbate')
samples = set(samples)
log.info('\n'.join(smis))
log.info('\n'.join(samples))
valid_molecules = []
for smi in set(samples):
isvalid = False
mol = Chem.MolFromSmiles(smi)
if mol:
isvalid = True
valid_molecules.append(smi)
log.info(f'Sample: {smi}, {isvalid}')
log.info('Valid Molecules' + "\n".join(valid_molecules))
log.info(f'Total samples = {len(samples)} unique samples {len(set(samples))} valids {len(valid_molecules)}')
if len(valid_molecules) < len(samples) * 0.3:
log.warning("TOO FEW VALID SAMPLES")
assert len(valid_molecules) != 0
| MegaMolBART-dev | tests/test_inference.py |
# # Copyright (c) 2022, NVIDIA CORPORATION.
# # SPDX-License-Identifier: Apache-2.0
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# import pytest
# import random
# import torch
# from nemo_chem.decoder import DecodeSampler
# from nemo_chem.tokenizer import MolEncTokenizer, MolEncTokenizerFromSmilesConfig
# from nemo_chem.models import MegaMolBARTModel, MegatronBARTConfig
# from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
# # TODO cleanup model tests
# # Use dummy SMILES strings
# react_data = [
# "CCO.C",
# "CCCl",
# "C(=O)CBr"
# ]
# prod_data = [
# "cc",
# "CCl",
# "CBr"
# ]
# random.seed(a=1)
# torch.manual_seed(1)
# initialize_model_parallel_for_nemo(
# world_size=1,
# global_rank=0,
# local_rank=0,
# tensor_model_parallel_size=1,
# seed=1234,
# )
# TEST_MODEL_CONFIG = MegatronBARTConfig()
# TEST_PERCEIVER_CONFIG = MegatronBARTConfig(encoder_type='perceiver')
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# @pytest.fixture(params=[TEST_MODEL_CONFIG, TEST_PERCEIVER_CONFIG])
# def args(request):
# _args = request.param
# return _args
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# @pytest.fixture
# def tokenizer():
# cfg = MolEncTokenizerFromSmilesConfig({'smiles': react_data + prod_data})
# _tokenizer = MolEncTokenizer.from_smiles(
# cfg.smiles["smiles"], cfg.regex, mask_scheme="replace")
# return _tokenizer
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# @pytest.fixture
# def model(args, tokenizer, sampler):
# pad_token_idx = tokenizer.vocab[tokenizer.pad_token]
# vocab_size = len(tokenizer)
# _model = MegatronBART(sampler,
# args.encoder_type,
# pad_token_idx,
# vocab_size,
# args.blocks_model,
# args.steps_model,
# args.d_model,
# args.num_layers,
# args.num_heads,
# args.d_feedforward,
# args.seq_len,
# dropout=0.1)
# return _model.cuda()
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# def test_pos_emb_shape(model, sampler, tokenizer, args):
# pos_embs = model._positional_embs()
# assert pos_embs.shape[0] == args.seq_len
# assert pos_embs.shape[1] == model.d_model # hidden size
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# def test_construct_input_shape(model, sampler, tokenizer, args):
# token_output = tokenizer.tokenize(react_data, sents2=prod_data, pad=True)
# tokens = token_output["original_tokens"]
# sent_masks = token_output["sentence_masks"]
# token_ids = torch.tensor(
# tokenizer.convert_tokens_to_ids(tokens)).transpose(0, 1).cuda()
# sent_masks = torch.tensor(sent_masks).transpose(0, 1).cuda()
# emb = model._construct_input(token_ids, sent_masks)
# assert emb.shape[0] == max([len(ts) for ts in tokens])
# assert emb.shape[1] == 3
# assert emb.shape[2] == args.d_model
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# def test_bart_forward_shape(model, sampler, tokenizer, args):
# react_token_output = tokenizer.tokenize(react_data, mask=True, pad=True)
# react_tokens = react_token_output["masked_tokens"]
# react_pad_mask = react_token_output["masked_pad_masks"]
# react_ids = torch.tensor(tokenizer.convert_tokens_to_ids(react_tokens)).T
# react_mask = torch.tensor(react_pad_mask).T
# prod_token_output = tokenizer.tokenize(prod_data, pad=True)
# prod_tokens = prod_token_output["original_tokens"]
# prod_pad_mask = prod_token_output["original_pad_masks"]
# prod_ids = torch.tensor(tokenizer.convert_tokens_to_ids(prod_tokens)).T
# prod_mask = torch.tensor(prod_pad_mask).T
# batch_input = {
# "encoder_input": react_ids.cuda(),
# "encoder_pad_mask": react_mask.cuda(),
# "decoder_input": prod_ids.cuda(),
# "decoder_pad_mask": prod_mask.cuda()
# }
# output = model(batch_input)
# model_output = output["model_output"]
# token_output = output["token_output"]
# exp_seq_len = 4 # From expected tokenized length of prod data
# exp_batch_size = len(prod_data)
# exp_dim = args.d_model # hidden_size
# exp_vocab_size = len(tokenizer)
# assert tuple(model_output.shape) == (exp_seq_len, exp_batch_size, exp_dim)
# assert tuple(token_output.shape) == (exp_seq_len, exp_batch_size, exp_vocab_size)
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# def test_bart_encode_shape(model, sampler, tokenizer, args):
# react_token_output = tokenizer.tokenize(react_data, mask=True, pad=True)
# react_tokens = react_token_output["masked_tokens"]
# react_pad_mask = react_token_output["masked_pad_masks"]
# react_ids = torch.tensor(tokenizer.convert_tokens_to_ids(react_tokens)).T
# react_mask = torch.tensor(react_pad_mask).T
# batch_input = {
# "encoder_input": react_ids.cuda(),
# "encoder_pad_mask": react_mask.cuda()
# }
# output = model.encode(batch_input)
# if args.encoder_type == 'seq2seq':
# exp_seq_len = 9 # From expected tokenized length of react data
# elif args.encoder_type == 'perceiver':
# exp_seq_len = args.steps_model # From expected num_hidden_steps of the Perceiver encoder
# exp_batch_size = len(react_data)
# exp_dim = args.d_model # hidden_size
# assert tuple(output.shape) == (exp_seq_len, exp_batch_size, exp_dim)
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# def test_bart_decode_shape(model, sampler, tokenizer, args):
# react_token_output = tokenizer.tokenize(react_data, mask=True, pad=True)
# react_tokens = react_token_output["masked_tokens"]
# react_pad_mask = react_token_output["masked_pad_masks"]
# react_ids = torch.tensor(tokenizer.convert_tokens_to_ids(react_tokens)).T
# react_mask = torch.tensor(react_pad_mask).T
# encode_input = {
# "encoder_input": react_ids.cuda(),
# "encoder_pad_mask": react_mask.cuda()
# }
# memory = model.encode(encode_input)
# prod_token_output = tokenizer.tokenize(prod_data, pad=True)
# prod_tokens = prod_token_output["original_tokens"]
# prod_pad_mask = prod_token_output["original_pad_masks"]
# prod_ids = torch.tensor(tokenizer.convert_tokens_to_ids(prod_tokens)).T
# prod_mask = torch.tensor(prod_pad_mask).T
# if args.encoder_type == "perceiver":
# react_mask = torch.zeros(
# (memory.shape[0:2]), dtype=react_mask.dtype, device=react_mask.device)
# batch_input = {
# "decoder_input": prod_ids.cuda(),
# "decoder_pad_mask": prod_mask.cuda(),
# "memory_input": memory.cuda(),
# "memory_pad_mask": react_mask.cuda()
# }
# output = model.decode(batch_input)
# exp_seq_len = 4 # From expected tokenized length of prod data
# exp_batch_size = len(react_data)
# exp_vocab_size = len(tokenizer)
# assert tuple(output.shape) == (exp_seq_len, exp_batch_size, exp_vocab_size)
# @pytest.mark.skip(reason="Model tests are currently deprecated")
# def test_calc_char_acc(model, sampler, tokenizer, args):
# react_token_output = tokenizer.tokenize(react_data[1:], pad=True)
# react_tokens = react_token_output["original_tokens"]
# react_pad_mask = react_token_output["original_pad_masks"]
# target_ids = torch.tensor(
# tokenizer.convert_tokens_to_ids(react_tokens)).T[1:, :]
# target_mask = torch.tensor(react_pad_mask).T[1:, :]
# # 9 is expected seq len of react data when padded
# token_output = torch.rand([8, len(react_data[1:]), len(tokenizer)])
# """
# Expected outputs
# CCCl
# C(=O)CBr
# Vocab:
# 0 <PAD>
# 3 &
# 6 C
# 7 O
# 8 .
# 9 Cl
# 10 (
# 11 =
# 12 )
# 13 Br
# """
# # Batch element 0
# token_output[0, 0, 6] += 1
# token_output[1, 0, 6] -= 1
# token_output[2, 0, 9] += 1
# token_output[3, 0, 3] += 1
# token_output[4, 0, 0] += 1
# token_output[5, 0, 0] -= 1
# # Batch element 1
# token_output[0, 1, 6] += 1
# token_output[1, 1, 10] += 1
# token_output[2, 1, 11] += 1
# token_output[3, 1, 7] += 1
# token_output[4, 1, 12] -= 1
# token_output[5, 1, 6] += 1
# token_output[6, 1, 13] -= 1
# token_output[7, 1, 3] += 1
# batch_input = {
# "target": target_ids.cuda(),
# "target_pad_mask": target_mask.cuda()
# }
# model_output = {
# "token_output": token_output.cuda()
# }
# token_acc = model._calc_char_acc(batch_input, model_output)
# exp_token_acc = (3 + 6) / (4 + 8)
# assert exp_token_acc == token_acc
| MegaMolBART-dev | tests/pre_train_model_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import random
import torch
from nemo_chem.tokenizer import MolEncTokenizer, MolEncTokenizerFromSmilesConfig
# Use dummy SMILES strings
smiles_data = [
"CCO.Ccc",
"CCClCCl",
"C(=O)CBr"
]
cfg = MolEncTokenizerFromSmilesConfig({'smiles': smiles_data})
example_tokens = [
["^", "C", "(", "=", "O", ")", "unknown", "&"],
["^", "C", "C", "<SEP>", "C", "Br", "&"]
]
# Setting seed here only applies when all tests run in same order, so these are now set per test
SEED = 0
def test_create_vocab():
tokenizer = MolEncTokenizer.from_smiles(smiles_data, cfg.regex)
expected = {
"<PAD>": 0,
"?": 1,
"^": 2,
"&": 3,
"<MASK>": 4,
"<SEP>": 5,
"C": 6,
"O": 7,
".": 8,
"c": 9,
"Cl": 10,
"(": 11,
"=": 12,
")": 13,
"Br": 14
}
vocab = dict(sorted(tokenizer.vocab.items(), key=lambda x: x[1]))
assert expected == vocab
assert len(vocab) == tokenizer.vocab_size == len(tokenizer)
def test_pad_seqs_padding():
seqs = [[1,2], [2,3,4,5], []]
padded, _ = MolEncTokenizer._pad_seqs(seqs, " ")
expected = [[1,2, " ", " "], [2,3,4,5], [" ", " ", " ", " "]]
assert padded == expected
def test_pad_seqs_mask():
seqs = [[1,2], [2,3,4,5], []]
_, mask = MolEncTokenizer._pad_seqs(seqs, " ")
expected_mask = [[1, 1, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0]] # masking is inverted with NeMo
assert expected_mask == mask
def test_mask_tokens_empty_mask():
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex)
masked, token_mask = tokenizer.mask_tokens(example_tokens, empty_mask=True)
expected_sum = 15 # masking is inverted with NeMo
mask_sum = sum([sum(m) for m in token_mask])
assert masked == example_tokens
assert expected_sum == mask_sum
# Run tests which require random masking first so we get deterministic masking
# NB: ordered running no longer required, this is kept for compatiiblity with previous versions
@pytest.mark.order(1)
def test_mask_tokens_replace():
random.seed(a=1)
torch.manual_seed(SEED)
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex, mask_prob=0.4, mask_scheme='replace')
masked, token_mask = tokenizer.mask_tokens(example_tokens)
expected_masks = [
[True, False, False, True, False, False, False, False],
[False, False, False, True, False, False, True]
]
assert expected_masks == token_mask
@pytest.mark.order(3)
def test_mask_tokens_span():
random.seed(a=1)
torch.manual_seed(SEED)
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex, mask_prob=0.4)
masked, token_mask = tokenizer.mask_tokens(example_tokens)
expected_masks = [
[True, False, False], [True, False, False, True]
]
assert token_mask == expected_masks
def test_convert_tokens_to_ids():
tokenizer = MolEncTokenizer.from_smiles(smiles_data[2:3], cfg.regex)
expected_ids = [[2, 6, 7, 8, 9, 10, 1, 3], [2, 6, 6, 5, 6, 11, 3]]
ids = tokenizer.convert_tokens_to_ids(example_tokens)
assert expected_ids == ids
ids = tokenizer.tokens_to_ids(example_tokens)
assert expected_ids == ids
def test_convert_ids_to_tokens():
tokenizer = MolEncTokenizer.from_smiles(smiles_data[2:3], cfg.regex)
ids = [[2, 6, 7, 8, 9, 10, 1, 3], [2, 6, 6, 5, 6, 11, 3]]
expected_tokens = [['^', 'C', '(', '=', 'O', ')', '?', '&'], ['^', 'C', 'C', '<SEP>', 'C', 'Br', '&']]
tokens = tokenizer.convert_ids_to_tokens(ids)
assert expected_tokens == tokens
tokens = tokenizer.ids_to_tokens(ids)
assert expected_tokens == tokens
def test_tokenize_one_sentence():
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex)
tokens = tokenizer.tokenize(smiles_data)
# BOS/EOS no longer added in tokenizer
expected = [
["C", "C", "O", ".", "C", "c", "c"],
["C", "C", "Cl", "C", "Cl"],
["C", "(", "=", "O", ")", "C", "Br"]
]
assert expected == tokens["original_tokens"]
@pytest.mark.skip(reason="Sentences are not currently supported")
def test_tokenize_two_sentences():
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex)
tokens = tokenizer.tokenize(smiles_data, sents2=smiles_data)
expected = [
["^", "C", "C", "O", ".", "C", "c", "c", "<SEP>", "C", "C", "O", ".", "C", "c", "c", "&"],
["^", "C", "C", "Cl", "C", "Cl", "<SEP>", "C", "C", "Cl", "C", "Cl", "&"],
["^", "C", "(", "=", "O", ")", "C", "Br", "<SEP>", "C", "(", "=", "O", ")", "C", "Br", "&"]
]
expected_sent_masks = [
([0] * 9) + ([1] * 8),
([0] * 7) + ([1] * 6),
([0] * 9) + ([1] * 8),
]
assert expected == tokens["original_tokens"]
assert expected_sent_masks == tokens["sentence_masks"]
@pytest.mark.skip(reason="Sentences are not currently supported")
@pytest.mark.order(2)
def test_tokenize_mask_replace():
random.seed(a=1)
torch.manual_seed(SEED)
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex, mask_prob=0.4, mask_scheme="replace")
tokens = tokenizer.tokenize(smiles_data, sents2=smiles_data, mask=True)
expected_m_tokens = [
['^', '<MASK>', 'C', 'O', '<MASK>', 'C', 'c', 'c', '<SEP>', '<MASK>', '<MASK>', 'O', '.', '<MASK>', '<MASK>', '<MASK>', '&'],
['^', '<MASK>', 'C', 'Cl', 'C', '<MASK>', '<SEP>', 'C', '<MASK>', 'Cl', 'C', '<MASK>', '&'],
['^', '<MASK>', '(', '=', '<MASK>', '<MASK>', 'C', 'Br', '<SEP>', 'C', '(', '=', 'O', ')', '<MASK>', 'Br', '&']
]
expected_tokens = [
['^', 'C', 'C', 'O', '.', 'C', 'c', 'c', '<SEP>', 'C', 'C', 'O', '.', 'C', 'c', 'c', '&'],
['^', 'C', 'C', 'Cl', 'C', 'Cl', '<SEP>', 'C', 'C', 'Cl', 'C', 'Cl', '&'],
['^', 'C', '(', '=', 'O', ')', 'C', 'Br', '<SEP>', 'C', '(', '=', 'O', ')', 'C', 'Br', '&']
]
assert expected_m_tokens == tokens["masked_tokens"]
assert expected_tokens == tokens["original_tokens"]
@pytest.mark.skip(reason="Sentences are not currently supported")
@pytest.mark.order(4)
def test_tokenize_mask_span():
random.seed(a=1)
torch.manual_seed(SEED)
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex, mask_prob=0.4)
tokens = tokenizer.tokenize(smiles_data, sents2=smiles_data, mask=True)
expected_m_tokens = [
['^', '<MASK>', 'c', '<SEP>', '<MASK>', '<MASK>', '&'],
['^', 'C', '<MASK>', 'Cl', '<SEP>', '<MASK>', '&'],
['^', 'C', '<MASK>', 'O', '<MASK>', '<SEP>', '<MASK>', '<MASK>', 'Br', '&']
]
expected_tokens = [
['^', 'C', 'C', 'O', '.', 'C', 'c', 'c', '<SEP>', 'C', 'C', 'O', '.', 'C', 'c', 'c', '&'],
['^', 'C', 'C', 'Cl', 'C', 'Cl', '<SEP>', 'C', 'C', 'Cl', 'C', 'Cl', '&'],
['^', 'C', '(', '=', 'O', ')', 'C', 'Br', '<SEP>', 'C', '(', '=', 'O', ')', 'C', 'Br', '&']
]
assert expected_m_tokens == tokens["masked_tokens"]
assert expected_tokens == tokens["original_tokens"]
assert len(tokens["masked_tokens"]) == len(tokens["token_masks"])
for ts, tms in zip(tokens["masked_tokens"], tokens["token_masks"]):
assert len(ts) == len(tms)
@pytest.mark.order(5)
def test_tokenize_mask_span_pad():
random.seed(a=1)
torch.manual_seed(SEED)
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex, mask_prob=0.4)
tokens = tokenizer.tokenize(smiles_data, mask=True, pad=True)
# BOS / EOS no longer added in tokenizer
expected_m_tokens = [
['<MASK>', 'c', '<PAD>', '<PAD>'],
['C', '<MASK>', 'Cl', '<PAD>'],
['C', '<MASK>', 'O', '<MASK>']
]
expected_tokens = [
['C', 'C', 'O', '.', 'C', 'c', 'c'],
['C', 'C', 'Cl', 'C', 'Cl', '<PAD>', '<PAD>'],
['C', '(', '=', 'O', ')', 'C', 'Br']
]
assert expected_m_tokens == tokens["masked_tokens"]
assert expected_tokens == tokens["original_tokens"]
assert len(tokens["masked_tokens"]) == len(tokens["token_masks"])
assert len(tokens["masked_tokens"]) == len(tokens["masked_pad_masks"])
for ts, tms in zip(tokens["masked_tokens"], tokens["token_masks"]):
assert len(ts) == len(tms)
for ts, pms in zip(tokens["masked_tokens"], tokens["masked_pad_masks"]):
assert len(ts) == len(pms)
@pytest.mark.skip(reason="Sentences are not currently supported")
def test_tokenize_padding():
tokenizer = MolEncTokenizer.from_smiles(cfg.smiles, cfg.regex)
output = tokenizer.tokenize(smiles_data, sents2=smiles_data, pad=True)
expected_tokens = [
["^", "C", "C", "O", ".", "C", "c", "c", "<SEP>", "C", "C", "O", ".", "C", "c", "c", "&"],
["^", "C", "C", "Cl", "C", "Cl", "<SEP>", "C", "C", "Cl", "C", "Cl", "&", "<PAD>", "<PAD>", "<PAD>", "<PAD>"],
["^", "C", "(", "=", "O", ")", "C", "Br", "<SEP>", "C", "(", "=", "O", ")", "C", "Br", "&"]
]
expected_pad_masks = [
[0] * 17,
([0] * 13) + ([1] * 4),
[0] * 17
]
expected_sent_masks = [
([0] * 9) + ([1] * 8),
([0] * 7) + ([1] * 6) + ([0] * 4),
([0] * 9) + ([1] * 8),
]
assert expected_tokens == output["original_tokens"]
assert expected_pad_masks == output["original_pad_masks"]
assert expected_sent_masks == output["sentence_masks"]
| MegaMolBART-dev | tests/test_tokenizer.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | MegaMolBART-dev | tests/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import torch
from pytorch_lightning.trainer.trainer import Trainer
from torch.utils.data import DataLoader
from nemo_chem.models.megamolbart import MegaMolBARTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin, NLPSaveRestoreConnector
from nemo.utils.app_state import AppState
assert torch.cuda.is_available()
from torch.utils.data.dataset import Dataset
from typing import Dict
class MoleculeRequestDataset(Dataset):
def __init__(self, request: Dict, tokenizer) -> None:
super().__init__()
self.request = request
self.tokenizer = tokenizer
# tokenize prompt
self.request['tokenized_prompt'] = ' '.join(self.tokenizer.text_to_tokens(request['prompt']))
tokens = self.tokenizer.text_to_ids(request['prompt'])
self.request['tokens'] = torch.tensor(tokens)
self.mask_prompt(self.request['prompt'])
def mask_prompt(self, sample):
sample = torch.LongTensor(self.tokenizer.text_to_ids(sample))
self.request['masked_sample'] = sample
def __len__(self):
return 1
def __getitem__(self, index):
return self.request
def main():
parser = ArgumentParser()
parser.add_argument("--model_file", type=str, required=True, help="Pass path to model's .nemo file")
parser.add_argument(
"--prompt", type=str, default="N[C@H]1CCC(=O)[C@H](O)[C@H](O)[C@H]1O", required=False, help="Prompt for the model (a text to complete)"
)
parser.add_argument(
"--tokens_to_generate", type=int, default="100", required=False, help="How many tokens to add to prompt"
)
parser.add_argument(
"--tensor_model_parallel_size", type=int, default=1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_size", type=int, default=1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_split_rank", type=int, default=0, required=False,
)
parser.add_argument("--precision", default="16", type=str, help="PyTorch Lightning Trainer precision flag")
args = parser.parse_args()
# cast precision to int if 32 or 16
if args.precision in ["32", "16"]:
args.precision = int(float(args.precision))
# trainer required for restoring model parallel models
trainer = Trainer(
plugins=NLPDDPPlugin(),
devices=args.tensor_model_parallel_size * args.pipeline_model_parallel_size,
accelerator='gpu',
precision=args.precision,
)
app_state = AppState()
if args.tensor_model_parallel_size > 1 or args.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=args.pipeline_model_parallel_split_rank,
)
model = MegaMolBARTModel.restore_from(
restore_path=args.model_file, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector(),
)
model.freeze()
request = {
"prompt": args.prompt,
"tokens_to_generate": args.tokens_to_generate,
}
dataset = MoleculeRequestDataset(request, model.tokenizer)
request_dl = DataLoader(dataset)
response = trainer.predict(model, request_dl)[0]
input_mol = response['prompt']
recon_mol = ''.join(response['completion']['text'])
print("***************************")
print(f"Reconstruction: {'PASS' if input_mol == recon_mol else 'FAIL'}")
print(f"input molecule: {input_mol}")
print(f"reconstructed molecule: {recon_mol}")
print("***************************")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| MegaMolBART-dev | examples/chem/megamolbart_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelSummary
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.plugins.environments.torchelastic_environment import TorchElasticEnvironment
from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from nemo.collections.nlp.parts.nlp_overrides import (
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPPlugin,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import StatelessTimer, exp_manager
from nemo_chem.models.megamolbart import MegaMolBARTModel
from nemo_chem.data import MoleculeCsvDatasetConfig
from nemo_chem.utils import recursive_make_dirs, update_dataclass_config
from nemo_chem.data import Preprocess, CsvToBinary
import os
def setup_trainer(cfg):
"""Trainer setup functions"""
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = [
NLPDDPPlugin(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
]
if cfg.trainer.precision in [16, 'bf16']:
scaler = None
if cfg.trainer.precision == 16:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, **cfg.trainer, callbacks=[ModelSummary(max_depth=3)])
exp_manager(trainer, cfg.get("exp_manager", None))
# recursive_make_dirs(log_dir)
# recursive_make_dirs(trainer.checkpoint_callback.dirpath)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
resume_from_checkpoint = cfg.model.resume_from_checkpoint
else:
resume_from_checkpoint = trainer._checkpoint_connector.resume_from_checkpoint_fit_path
logging.info(f'Resuming training from checkpoint: {resume_from_checkpoint}')
trainer._checkpoint_connector = CheckpointConnector(trainer, resume_from_checkpoint=resume_from_checkpoint)
# Override timer callback to a stateless one
for idx, callback in enumerate(trainer.callbacks):
if isinstance(callback, Timer):
trainer.callbacks[idx] = StatelessTimer(cfg.trainer.max_time,)
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
with open_dict(cfg):
cfg.model.precision = cfg.trainer.precision
return trainer
@hydra_runner(config_path="conf", config_name="megamolbart_pretrain_xsmall_span_aug")
def main(cfg) -> None:
with open_dict(cfg):
cfg.model.data = update_dataclass_config(cfg.model.data, MoleculeCsvDatasetConfig)
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
trainer = setup_trainer(cfg)
model = MegaMolBARTModel(cfg.model, trainer)
logging.info("************** Model parameters and their sizes ***********")
for name, param in model.named_parameters():
logging.info(f'{name}: {param.size()}')
logging.info("***********************************************************")
if cfg.do_training:
logging.info("************** Starting Training ***********")
trainer.fit(model)
logging.info("************** Finished Training ***********")
else:
logging.info("************** Starting Data PreProcessing ***********")
logging.info("Processing data into CSV files")
preprocess = Preprocess()
preprocess.prepare_dataset(links_file=cfg.model.data.links_file,
output_dir=cfg.model.data.dataset_path)
if cfg.model.data.dataset_format == "bin":
logging.info("Converting CSV data into Binary")
csvtobin = CsvToBinary(input_dir=cfg.model.data.dataset_path,
out_dir=cfg.model.data.dataset_path,
config=cfg,
num_enumerations=cfg.model.data.num_enumerations,
num_workers=cfg.model.data.num_workers)
csvtobin.prepare_dataset()
logging.info("************** Finished Data PreProcessing ***********")
if cfg.do_testing:
logging.info("************** Starting Testing ***********")
trainer.test(model)
logging.info("************** Finished Testing ***********")
if __name__ == '__main__':
main()
| MegaMolBART-dev | examples/chem/megamolbart_pretrain.py |
import grpc
import torch
import logging
from megamolbart_pb2_grpc import GenerativeSamplerStub
from megamolbart_pb2 import InputSpec
log = logging.getLogger(__name__)
class InferenceWrapper():
def __init__(self):
channel = grpc.insecure_channel('localhost:50051')
self.stub = GenerativeSamplerStub(channel)
def smis_to_embedding(self, smis):
spec = InputSpec(smis=smis)
resp = self.stub.SmilesToEmbedding(spec)
embeddings = torch.FloatTensor(list(resp.embeddings))
embeddings = torch.reshape(embeddings, tuple(resp.dim)).cuda()
return embeddings
def smis_to_hidden(self, smis):
spec = InputSpec(smis=smis)
resp = self.stub.SmilesToHidden(spec)
hidden_states = torch.FloatTensor(list(resp.hidden_states))
hidden_states = torch.reshape(hidden_states, tuple(resp.dim)).cuda()
masks = torch.BoolTensor(list(resp.masks))
masks = torch.reshape(masks, tuple(resp.dim[:2])).cuda()
return hidden_states, masks
def hidden_to_smis(self, hidden_states, masks):
dim = hidden_states.shape
spec = InputSpec(hidden_states=hidden_states.flatten().tolist(),
dim=dim,
masks=masks.flatten().tolist())
resp = self.stub.HiddenToSmis(spec)
return resp.smis
| MegaMolBART-dev | examples/chem/nbs/infer.py |
#!/usr/bin/python3
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Modules imported for unpack tool"""
import argparse
import hashlib
import json
import math
import os
import re
import stat
import sys
import time
import uuid
UNPACK_TOOL_VERSION = "4.0.6"
class Util:
"""
Class with static helper functions
"""
LOG_FILE = "./fwpkg_unpack_log.txt"
LOGFILE_PATH = ""
@staticmethod
def cli_log(log_msg, log_file_only=False):
"""
Append log message to cli log file
"""
log_file = Util.LOG_FILE
file_handle = None
try:
with open(log_file, "a+", encoding="utf-8") as file_handle:
localtime = time.asctime(time.localtime(time.time()))
file_handle.write(f"{localtime} : {log_msg}\n")
Util.LOGFILE_PATH = os.path.abspath(file_handle.name)
if log_file_only is False:
print(log_msg)
except PermissionError as _:
print(log_msg)
print(f"Error: Failed to open or create {log_file}")
@staticmethod
def get_descriptor_type_name(desc_type):
"""
Return the descriptive name for given integer descriptor type.
"""
desc_type_dict = {
0x0000: "PCI Vendor ID",
0x0001: "IANA Enterprise ID",
0x0002: "UUID",
0x0003: "PnP Vendor ID",
0x0004: "ACPI Vendor ID",
0x0005: "IEEE Assigned Company ID",
0x0006: "SCSI Vendor ID",
0x0100: "PCI Device ID",
0x0101: "PCI Subsystem Vendor ID",
0x0102: "PCI Subsystem ID",
0x0103: "PCI Revision ID",
0x0104: "PnP Product Identifier",
0x0105: "ACPI Product Identifier",
0x0106: "ASCII Model Number",
0x0107: "ASCII Model Number",
0x0108: "SCSI Product ID",
0x0109: "UBM Controller Device Code",
0xffff: "Vendor Defined",
}
name = desc_type_dict.get(desc_type, f'{desc_type:#x}')
return name
@staticmethod
def get_timestamp_str(timestamp):
"""
Return timestamp string from 13 byte binary data
according to PLDM Base specification
"""
year = timestamp[11]
year = year << 8
year = year | timestamp[10]
time_str = str(year) + "-"
time_str = time_str + str(timestamp[9])
time_str = time_str + "-" + str(timestamp[8])
time_str = time_str + " " + str(timestamp[7])
time_str = time_str + ":" + str(timestamp[6])
time_str = time_str + ":" + str(timestamp[5])
micro_sec = timestamp[4]
micro_sec = micro_sec << 8
micro_sec = micro_sec | timestamp[3]
micro_sec = micro_sec << 8
micro_sec = micro_sec | timestamp[2]
time_str = time_str + ":" + str(micro_sec)
utc_offset = timestamp[1]
utc_offset = utc_offset << 8
utc_offset = utc_offset | timestamp[0]
sign = "+"
if utc_offset < 0:
utc_offset = utc_offset * -1
sign = "-"
time_str = time_str + " " + sign + str(utc_offset)
return time_str
@staticmethod
def get_checksum_for_component_image(fw_image):
"""
Compute SHA256 for the given component image.
"""
sha256 = ""
try:
with open(fw_image, 'rb') as file_name:
data = file_name.read()
sha256 = hashlib.sha256(data).hexdigest()
except (FileNotFoundError, IOError) as err:
log_msg = f'Error: {err}'
Util.cli_log(log_msg, False)
return sha256
@staticmethod
def get_padded_hex(byte_arr):
"""
Get hex formatted version of a byte array padded with 0
"""
total_len = len(byte_arr)
hex_str = hex(
int.from_bytes(byte_arr, byteorder='little', signed=False))[2:]
padded_str = '0x' + hex_str.zfill(total_len * 2)
return padded_str
class PLDMUnpack:
# pylint: disable=too-many-instance-attributes
"""
PLDMUnpack class implements a PLDM parser and the unpack tool
along with its required features.
...
Attributes
----------
package : str
Path/Name of the input firmware package
unpack : bool
True if tool should unpack firmware images
fwpkg_fd : io.TextIOWrapper
Instance used to read from package file
header_map : dict
Stores the PLDM Package Header Information parsed from given package
device_id_record_count : int
Number of PLDM FirmwareDeviceIDRecords found in given package
fd_id_record_list : list
List of FirmwareDeviceIDRecords parsed from given package
component_img_info_list : list
List of ComponentImageInformation parsed from given package
Methods
-------
parse_header() :
Parses PLDM Package Header Information
parse_device_id_records() :
Parses FirmwareDeviceIDRecords from package
parse_component_img_info() :
Parses ComponentImageInformation from package
get_image_name_from_records(comp_info_index) :
Identify records which which contain metadata for image naming
get_image_name(comp_info_index) :
Get image name string by appending various metadata
create_unpacked_files(output_dir) :
Extract each firmware image in a file
unpack_pldm_package(package_name, output_dir) :
Perform complete parsing and extraction of package
get_applicable_component_index(applicable_component):
Return applicable_component as list of indices
get_ec_info(filepath) :
Get all EC metadata from extraxted firmware
get_ap_metadata(filepath) :
Get all AP metadata from extraxted firmware
get_signature_type(fw_image, component_identifier):
Get Signature type for given firmware image and component identifier
is_glacier_device(product, device_name):
Is this device a glacer device
get_formatted_descriptors(record_desc, components):
Method to prepare descriptor section for json output
prepare_records_json():
Prepares the JSON output.
"""
def __init__(self):
"""
Contructor for PLDMUnpack class
"""
self.unpack = True
self.package = ""
self.fwpkg_fd = 0
self.header_map = {}
self.device_id_record_count = 0
self.fd_id_record_list = []
self.component_img_info_list = []
self.full_header = {
"PackageHeaderInformation": {},
"FirmwareDeviceIdentificationArea": {},
"ComponentImageInformationArea": {},
"Package Header Checksum": ''
}
self.verbose = False
self.little_endian_list = [
"IANA Enterprise ID", "PCI Vendor ID", "PCI Device ID",
"PCI Subsystem Vendor ID", "PCI Subsystem ID"
]
def parse_header(self):
"""
Parse PLDM header data into self.header_map
Returns :
True if parsing successful
"""
# check if UUID is valid
pldm_fw_header_id_v1_0 = b'\xf0\x18\x87\x8c\xcb\x7d\x49\x43\x98\x00\xa0\x2f\x05\x9a\xca\x02'
uuid_v1_0 = str(uuid.UUID(bytes=pldm_fw_header_id_v1_0))
self.header_map["PackageHeaderIdentifier"] = str(
uuid.UUID(bytes=self.fwpkg_fd.read(16)))
if uuid_v1_0 != self.header_map["PackageHeaderIdentifier"]:
log_msg = "Expected PLDM v1.0 but PackageHeaderIdentifier is "\
+ self.header_map["PackageHeaderIdentifier"]
Util.cli_log(log_msg, False)
return False
self.header_map["PackageHeaderFormatRevision"] = str(
int.from_bytes(self.fwpkg_fd.read(1),
byteorder='little',
signed=False))
self.header_map["PackageHeaderSize"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
timestamp = self.fwpkg_fd.read(13)
self.header_map["PackageReleaseDateTime"] = Util.get_timestamp_str(
timestamp)
self.header_map["ComponentBitmapBitLength"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
self.header_map["PackageVersionStringType"] = int.from_bytes(
self.fwpkg_fd.read(1), byteorder='little', signed=False)
version_str_len = int.from_bytes(self.fwpkg_fd.read(1),
byteorder='little',
signed=False)
self.header_map["PackageVersionStringLength"] = version_str_len
self.header_map["PackageVersionString"] = self.fwpkg_fd.read(
version_str_len).decode('utf-8')
self.full_header["PackageHeaderInformation"] = self.header_map
return True
def parse_device_id_records(self):
"""
Parse PLDM FirmwareDeviceIDRecords data into self.fd_id_record_list
Returns:
True if parsing is successful
"""
# pylint: disable=line-too-long
self.device_id_record_count = int.from_bytes(self.fwpkg_fd.read(1),
byteorder='little',
signed=False)
for _ in range(self.device_id_record_count):
id_record_map = {}
id_record_map["RecordLength"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
id_record_map["DescriptorCount"] = int.from_bytes(
self.fwpkg_fd.read(1), byteorder='little', signed=False)
id_record_map["DeviceUpdateOptionFlags"] = int.from_bytes(
self.fwpkg_fd.read(4), byteorder='little', signed=False)
id_record_map[
"ComponentImageSetVersionStringType"] = int.from_bytes(
self.fwpkg_fd.read(1), byteorder='little', signed=False)
id_record_map[
"ComponentImageSetVersionStringLength"] = int.from_bytes(
self.fwpkg_fd.read(1), byteorder='little', signed=False)
id_record_map["FirmwareDevicePackageDataLength"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
applicable_component_size = math.ceil(
self.header_map["ComponentBitmapBitLength"] / 8)
id_record_map["ApplicableComponents"] = int.from_bytes(
self.fwpkg_fd.read(applicable_component_size),
byteorder='little',
signed=False)
id_record_map[
"ComponentImageSetVersionString"] = self.fwpkg_fd.read(
id_record_map["ComponentImageSetVersionStringLength"]
).decode('utf-8')
descriptors = []
for j in range(id_record_map["DescriptorCount"]):
descriptor_map = {}
if j == 0:
descriptor_map["InitialDescriptorType"] = int.from_bytes(
self.fwpkg_fd.read(2),
byteorder='little',
signed=False)
descriptor_map["InitialDescriptorLength"] = int.from_bytes(
self.fwpkg_fd.read(2),
byteorder='little',
signed=False)
value = self.fwpkg_fd.read(
descriptor_map["InitialDescriptorLength"])
descriptor_map["InitialDescriptorData"] = value
else:
descriptor_map[
"AdditionalDescriptorType"] = int.from_bytes(
self.fwpkg_fd.read(2),
byteorder='little',
signed=False)
descriptor_map[
"AdditionalDescriptorLength"] = int.from_bytes(
self.fwpkg_fd.read(2),
byteorder='little',
signed=False)
if descriptor_map["AdditionalDescriptorType"] == 0xFFFF:
descriptor_map[
"VendorDefinedDescriptorTitleStringType"] = int.from_bytes(
self.fwpkg_fd.read(1),
byteorder='little',
signed=False)
descriptor_map[
"VendorDefinedDescriptorTitleStringLength"] = int.from_bytes(
self.fwpkg_fd.read(1),
byteorder='little',
signed=False)
descriptor_map[
"VendorDefinedDescriptorTitleString"] = self.fwpkg_fd.read(
descriptor_map[
"VendorDefinedDescriptorTitleStringLength"]
).decode('utf-8')
vendor_def_data_len = (
descriptor_map["AdditionalDescriptorLength"] -
(2 + descriptor_map[
"VendorDefinedDescriptorTitleStringLength"]))
descriptor_map[
"VendorDefinedDescriptorData"] = self.fwpkg_fd.read(
vendor_def_data_len).hex()
else:
descriptor_map[
"AdditionalDescriptorIdentifierData"] = self.fwpkg_fd.read(
descriptor_map["AdditionalDescriptorLength"])
descriptors.append(descriptor_map)
id_record_map["RecordDescriptors"] = descriptors
id_record_map["FirmwareDevicePackageData"] = self.fwpkg_fd.read(
id_record_map["FirmwareDevicePackageDataLength"]).decode(
'utf-8')
self.fd_id_record_list.append(id_record_map)
self.full_header["FirmwareDeviceIdentificationArea"] = {
"DeviceIDRecordCount": self.device_id_record_count,
"FirmwareDeviceIDRecords": self.fd_id_record_list
}
return True
def parse_component_img_info(self):
"""
Parse PLDM Component Image info data into self.fd_id_record_list
Returns :
True if parsing successful
"""
component_image_count = int.from_bytes(self.fwpkg_fd.read(2),
byteorder='little',
signed=False)
for _ in range(component_image_count):
comp_info = {}
comp_info["ComponentClassification"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
comp_info["ComponentIdentifier"] = hex(
int.from_bytes(self.fwpkg_fd.read(2),
byteorder='little',
signed=False))
comp_info["ComponentComparisonStamp"] = int.from_bytes(
self.fwpkg_fd.read(4), byteorder='little', signed=False)
comp_info["ComponentOptions"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
comp_info["RequestedComponentActivationMethod"] = int.from_bytes(
self.fwpkg_fd.read(2), byteorder='little', signed=False)
# RequestedComponentActivationMethod can have any combination of bits 0:5 set
# Any value above 0x3F is invalid
activation_val = comp_info["RequestedComponentActivationMethod"]
if activation_val > 0x3F:
Util.cli_log(
f"Found invalid value for RequestedComponentActivationMethod={activation_val}",
True)
comp_info["ComponentLocationOffset"] = int.from_bytes(
self.fwpkg_fd.read(4), byteorder='little', signed=False)
comp_info["ComponentSize"] = int.from_bytes(self.fwpkg_fd.read(4),
byteorder='little',
signed=False)
comp_info["ComponentVersionStringType"] = int.from_bytes(
self.fwpkg_fd.read(1), byteorder='little', signed=False)
comp_info["ComponentVersionStringLength"] = int.from_bytes(
self.fwpkg_fd.read(1), byteorder='little', signed=False)
comp_info["ComponentVersionString"] = self.fwpkg_fd.read(
comp_info["ComponentVersionStringLength"]).decode('utf-8')
self.component_img_info_list.append(comp_info)
self.full_header["ComponentImageInformationArea"] = {
"ComponentImageCount": component_image_count,
"ComponentImageInformation": self.component_img_info_list
}
return True
def get_image_name_from_records(self, comp_info_index):
"""
Identify records which which contain metadata for image at
index comp_info_index component image info list
Parameter:
comp_info_index index of image in component image
info section
Returns:
Name of the applicable record for given image
or "" if nothing found
"""
mask = 1 << comp_info_index
for rec in self.fd_id_record_list:
applicable_comp_indices = rec["ApplicableComponents"]
name = rec["ComponentImageSetVersionString"]
if mask & applicable_comp_indices == mask:
if name.find(",") == -1:
return name, rec['RecordDescriptors']
components = name.split(",")
applicable_comp = applicable_comp_indices
count = 0
for _ in range(comp_info_index + 1):
if applicable_comp & 1 == 1:
count = count + 1
applicable_comp = applicable_comp >> 1
return components[count - 1], rec['RecordDescriptors']
return "", None
def get_image_name(self, comp_info_index):
"""
Create the image name string by appending various metadata
separated by '_'
Parameter:
comp_info_index index of image in component image
for naming
Returns:
Name of the image for unpacking
or ""
"""
comp_info = self.component_img_info_list[comp_info_index]
name, _ = self.get_image_name_from_records(comp_info_index)
if name != "":
name = name.replace(":", "_")
name = name.replace("_N/A", "")
name = name + "_" + comp_info["ComponentVersionString"]
if name.startswith("FW-Package"):
name = name + ".fwpkg"
else:
name = name + "_image.bin"
name = re.sub("_+", "_", name)
return name
def create_unpacked_files(self, output_dir):
"""
Extract each firmware image from the
Firmware Package Payload section of the input file.
Parameter:
output_dir path of the directory to store the
extracted files
Returns:
True if unpacking was successful
"""
package_size = os.path.getsize(self.package)
for index, info in enumerate(self.component_img_info_list):
offset = info["ComponentLocationOffset"]
size = info["ComponentSize"]
if offset + size > package_size:
log_msg = f"Error: ComponentLocationOffset {offset} + \
ComponentSize {size} exceeds given package size {package_size}"
Util.cli_log(log_msg, False)
return False
img_name = output_dir + self.get_image_name(index)
if img_name == "":
log_msg = "Error: The input firmware package does not conform to \
the format created by NVIDIA packaging tool."
Util.cli_log(log_msg, False)
return False
try:
if os.path.exists(img_name):
os.remove(img_name)
with open(img_name, "w+b") as component_img_fd:
self.fwpkg_fd.seek(offset, 0)
bytes_left = size
buffer_size = 2048
while bytes_left > 0:
if bytes_left < 2048:
buffer_size = bytes_left
buffer = self.fwpkg_fd.read(buffer_size)
component_img_fd.write(buffer)
bytes_left = bytes_left - buffer_size
info["FWImageName"] = img_name
if os.path.exists(img_name):
os.chmod(img_name,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
except OSError as err:
log_msg = f"Error: Could not create file {img_name} due to {err}"
Util.cli_log(log_msg, False)
return False
return True
def get_pldm_header_checksum(self):
""" Read PLDM header checksum """
self.full_header['Package Header Checksum'] = int.from_bytes(
self.fwpkg_fd.read(4), byteorder='little', signed=False)
def unpack_pldm_package(self, package_name, output_dir):
"""
Parse the PLDM package and get information about components included in the FW image.
Unpack the package if required.
Parameters:
package_name filepath of input package file
output_dir directory to store the resulting unpacked files
Returns:
True if parsing and unpacking was successful
"""
if package_name == "" or package_name is None:
log_msg = "ERROR: Firmware package file is mandatory."
Util.cli_log(log_msg, False)
return False
if os.path.exists(package_name) is False:
log_msg = print("ERROR: File does not exist at path ",
package_name)
Util.cli_log(log_msg, False)
return False
self.package = package_name
try:
with open(self.package, "rb") as self.fwpkg_fd:
parsing_valid = self.parse_header()
if parsing_valid:
parsing_valid = self.parse_device_id_records()
if parsing_valid:
parsing_valid = self.parse_component_img_info()
self.get_pldm_header_checksum()
if parsing_valid and self.unpack:
if output_dir == "" or output_dir is None:
# If outdir was not given in command
# assume current directory
output_dir = "."
output_dir = os.path.abspath(output_dir) + "/"
# If dir doesn't exist, create it
if os.path.isdir(output_dir) is False:
os.makedirs(output_dir)
parsing_valid = self.create_unpacked_files(output_dir)
if self.verbose:
log_message = f"PLDM Output directory: {output_dir}, \
Package name: {package_name}"
Util.cli_log(log_message, True)
if parsing_valid is False:
log_message = "Package Header Contents:\
" + str(self.header_map)
Util.cli_log(log_message, True)
log_message = "FirmwareDeviceIDRecords Contents:\
" + str(self.fd_id_record_list)
Util.cli_log(log_message, True)
log_message = "ComponentImageInformation Contents:\
" + str(self.component_img_info_list)
Util.cli_log(log_message, True)
return parsing_valid
except IOError as e_io_error:
log_message = f"Couldn't open or read given FW package ({e_io_error})"
Util.cli_log(log_message, False)
return False
def get_applicable_component_index(self, applicable_component):
"""
Return list of indices of applicable component images from
applicable_component index bitmap.
"""
# number of images in the image section
max_bits = len(self.component_img_info_list)
indices = []
for shift in range(max_bits):
# for each index check if the bit at that position is set in applicable_component
mask = 1 << shift
result = applicable_component & mask
if result == mask:
indices.append(shift)
return indices
def get_signature_type(self, fw_image, component_identifier):
""" Method to tell if unpacked bin is prod signed or debug signed """
return 'N/A'
@staticmethod
def is_glacier_device(record, device_name):
"""
Is this device a glacer device
"""
if device_name.startswith("ERoT"):
return True
if record["DescriptorCount"] == 0:
return False
record_desc = record["RecordDescriptors"]
for desc in record_desc:
descriptor_type = desc.get("AdditionalDescriptorType", "")
if descriptor_type == 65535:
title = desc.get("VendorDefinedDescriptorTitleString", "")
if title == "GLACIERDSD":
return True
return False
def get_applicable_components_names(self, record):
# pylint: disable=too-many-branches
"""
Method to create list of applicable component images and their metadata like
ComponentIdentifier and Version. FWImage is included if unpacking was done.
Also prepares ComponentImageSetVersionString in name:model:vendor,... format if
it is not already so.
"""
index = self.get_applicable_component_index(
record["ApplicableComponents"])
components = []
device_name = record["ComponentImageSetVersionString"]
for i in index:
component = {}
img = self.component_img_info_list[i]
if self.unpack is True:
component = {
"ComponentIdentifier": "",
"ComponentVersionString": "",
"FWImage": ""
}
component["FWImage"] = img["FWImageName"]
component[
"FWImageSHA256"] = Util.get_checksum_for_component_image(
component["FWImage"])
# For ERoT associated devices get signature type
if self.is_glacier_device(
record, component["FWImage"].rsplit('/', 1)[-1]):
signature_type = self.get_signature_type(
component["FWImage"], img["ComponentIdentifier"])
if signature_type:
component["SignatureType"] = signature_type
else:
component["SignatureType"] = "N/A"
component["FWImageSize"] = img["ComponentSize"]
else:
component = {
"ComponentIdentifier": "",
"ComponentVersionString": ""
}
component["ComponentIdentifier"] = img["ComponentIdentifier"]
component["ComponentVersionString"] = img["ComponentVersionString"]
components.append(component)
if not self.unpack:
ap_sku, ec_sku = 'N/A', 'N/A'
records = record["RecordDescriptors"]
for i in range(1, len(records)):
if records[i]["AdditionalDescriptorType"] == 65535:
if records[i][
"VendorDefinedDescriptorTitleString"] == "APSKU":
ap_sku = "0x" + records[i][
"VendorDefinedDescriptorData"]
elif records[i][
"VendorDefinedDescriptorTitleString"] == "ECSKU":
ec_sku = "0x" + records[i][
"VendorDefinedDescriptorData"]
for component in components:
if component.get("ComponentIdentifier") == "0xff00":
component["ECSKUID"] = ec_sku
else:
component["APSKUID"] = ap_sku
return components, device_name
def decode_descriptor_data(self, desc_type_name, desc_data):
""" Formatting for descriptor data based on endianess"""
desc_val = ""
if desc_type_name in self.little_endian_list:
desc_val = Util.get_padded_hex(desc_data)
else:
desc_val = "0x" + desc_data.hex()
return desc_val
def get_formatted_descriptors(self, record_desc, components):
"""
Method to prepare stripped and formatted descriptor section for json output.
"""
records = record_desc["RecordDescriptors"]
descriptors = []
desc = {}
if len(records) == 0:
return descriptors
desc["InitialDescriptorType"] = Util.get_descriptor_type_name(
records[0]["InitialDescriptorType"])
desc["InitialDescriptorData"] = self.decode_descriptor_data(
desc["InitialDescriptorType"], records[0]["InitialDescriptorData"])
descriptors.append(desc)
for i in range(1, len(records)):
desc = {}
desc["AdditionalDescriptorType"] = Util.get_descriptor_type_name(
records[i]["AdditionalDescriptorType"])
if records[i]["AdditionalDescriptorType"] == 65535:
desc["VendorDefinedDescriptorTitleString"] = records[i][
"VendorDefinedDescriptorTitleString"]
desc_data = records[i]["VendorDefinedDescriptorData"]
desc["VendorDefinedDescriptorData"] = '0x' + str(desc_data)
if desc["VendorDefinedDescriptorTitleString"] == "APSKU":
# AP SKU on Retimer is just vendor id, not a real AP SKU ID. So skip
if "FWImage" in components[-1] and \
not "PCIeRetimer" in components[-1]["FWImage"]:
bin_ary = bytearray.fromhex(
desc_data[:-2]) # First byte is strap id
bin_ary.reverse()
ap_sku_id = ''.join(format(x, '02x') for x in bin_ary)
components[-1]["AP_SKU_ID"] = "0x" + ap_sku_id
desc["VendorDefinedDescriptorData"] = components[-1][
"AP_SKU_ID"]
else:
desc["AdditionalDescriptorData"] = self.decode_descriptor_data(
desc["AdditionalDescriptorType"],
records[i]["AdditionalDescriptorIdentifierData"])
descriptors.append(desc)
return descriptors
def get_full_metadata_json(self):
""" Decode byte value descriptors for full package metadata command """
for device_records in self.full_header[
'FirmwareDeviceIdentificationArea']['FirmwareDeviceIDRecords']:
device_records[
'ApplicableComponents'] = self.get_applicable_component_index(
device_records['ApplicableComponents'])
records = device_records["RecordDescriptors"]
descriptors = []
if len(records) == 0:
continue
desc = records[0]
desc["InitialDescriptorType"] = Util.get_descriptor_type_name(
records[0]["InitialDescriptorType"])
desc["InitialDescriptorData"] = self.decode_descriptor_data(
desc["InitialDescriptorType"], desc["InitialDescriptorData"])
descriptors.append(desc)
for i in range(1, len(records)):
desc = records[i]
desc[
"AdditionalDescriptorType"] = Util.get_descriptor_type_name(
records[i]["AdditionalDescriptorType"])
if desc["AdditionalDescriptorType"] == 'Vendor Defined':
desc["VendorDefinedDescriptorTitleString"] = records[i][
"VendorDefinedDescriptorTitleString"]
desc_data = records[i]["VendorDefinedDescriptorData"]
desc["VendorDefinedDescriptorData"] = '0x' + str(desc_data)
else:
desc[
"AdditionalDescriptorIdentifierData"] = self.decode_descriptor_data(
desc["AdditionalDescriptorType"],
desc["AdditionalDescriptorIdentifierData"])
descriptors.append(desc)
device_records["RecordDescriptors"] = descriptors
def prepare_records_json(self):
# pylint: disable=line-too-long
"""
Prepares the JSON output for the tool.
"""
package_json = {
"PackageHeaderInformation": {},
"FirmwareDeviceRecords": []
}
package_json["PackageHeaderInformation"]["PackageHeaderIdentifier"] = (
self.header_map["PackageHeaderIdentifier"])
package_json["PackageHeaderInformation"][
"PackageHeaderFormatRevision"] = (
self.header_map["PackageHeaderFormatRevision"])
if package_json["PackageHeaderInformation"][
"PackageHeaderFormatRevision"] != "1":
return False, "The input firmware package version does not conform \
to the format created by NVIDIA packaging tool."
package_json["PackageHeaderInformation"]["PackageReleaseDateTime"] = (
self.header_map["PackageReleaseDateTime"])
package_json["PackageHeaderInformation"]["PackageVersionString"] = (
self.header_map["PackageVersionString"])
package_json['PackageHeaderInformation']["PackageSHA256"] = (
Util.get_checksum_for_component_image(self.package))
recordlist = []
for record in self.fd_id_record_list:
rec = {
"ComponentImageSetVersionString": "",
"DeviceDescriptors": [],
"Components": []
}
components, name = self.get_applicable_components_names(record)
if not components or not name:
return False, "The input firmware package does not conform to \
the format created by NVIDIA packaging tool."
rec["DeviceDescriptors"] = self.get_formatted_descriptors(
record, components)
rec["Components"] = components
rec["ComponentImageSetVersionString"] = name
recordlist.append(rec)
package_json["FirmwareDeviceRecords"] = recordlist
json_string = json.dumps(package_json, indent=4)
return True, json_string
def main():
"""
Call upack parser and prepare output json
"""
arg_parser = argparse.ArgumentParser(prog='fwpkg-unpack',
description="\
NVIDIA fwpkg-unpack v{UNPACK_TOOL_VERSION} The firmware package unpack tool performs parsing of\
the firmware package and unpacking. The unpacker will extract all firmware\
images from the package and create bin files for each.",
allow_abbrev=False)
arg_parser.add_argument(
"file", help="Provide firmware package filename to unpack.", nargs='?')
arg_group = arg_parser.add_mutually_exclusive_group(required=True)
arg_group.add_argument(
"--unpack",
action='store_true',
help="Unpack the firmware package and extract all component images.")
arg_group.add_argument(
"--show_pkg_content",
action='store_true',
help=
"Provide package content description without extracting firmware images."
)
arg_group.add_argument(
"--show_all_metadata",
action='store_true',
help=
"Provide all PLDM metadata in package without extracting firmware images."
)
arg_parser.add_argument(
"--outdir",
help=
"Provide path to the directory where unpacked FW files will be stored. \
This option is used along with --unpack. \
If this option not specified with --unpack, current directory is assumed as outdir. \
Creates the directory at a given path if it does not exist.")
arg_group.add_argument("--version",
action='store_true',
help="Show tool version.")
arg_parser.add_argument(
"--verbose",
action='store_true',
help=
"Verbose Mode, This option is used along with --unpack or --show_pkg_content. \
By using this command, debug prints from the code will be copied in a debug \
logfile created in the same directory with name fwpkg_unpack_log.txt from\
unpack tool.")
tool_args = arg_parser.parse_args()
pldm_parser = PLDMUnpack()
pldm_parser.unpack = tool_args.unpack
if tool_args.show_pkg_content is True:
pldm_parser.unpack = False
if tool_args.version is True:
print(f"NVIDIA fwpkg-unpack - version {UNPACK_TOOL_VERSION}")
sys.exit(0)
else:
parser_status = pldm_parser.unpack_pldm_package(
tool_args.file, tool_args.outdir)
if parser_status is True:
json_output = {}
if tool_args.show_all_metadata is False:
parser_status, json_output = pldm_parser.prepare_records_json()
if not parser_status:
print("Status : Failed to prepare JSON records")
print("Path for LogFile ", Util.LOGFILE_PATH)
else:
pldm_parser.get_full_metadata_json()
json_output = json.dumps(pldm_parser.full_header,
sort_keys=False,
indent=4)
print(json_output)
sys.exit(0)
else:
print("Status : Failed")
print("Path for LogFile ", Util.LOGFILE_PATH)
sys.exit(1)
if __name__ == "__main__":
main()
| PLDM-unpack-main | fwpkg_unpack.py |
#!/usr/bin/python3
"""
Auto unit test vector generator. Looks in the GENERATORS_DIR directory for any executable scripts or binaries, and
runs them to generate test vectors. Writes out a manifest file for caching vectors that have already been
generated.
"""
import os
import subprocess
import hashlib
import csv
import pathlib
GENERATORS_DIR_REL = "generators/"
GENERATED_DIR = "generated/"
MANIFEST_FILE_REL = "manifest.txt"
cur_path = str(pathlib.Path(__file__).parent.absolute())
cwd = os.getcwd()
abs_matx = cur_path[:cur_path.find('/matx/') + len('/matx/')]
GENERATORS_DIR = f"{cur_path}/{GENERATORS_DIR_REL}"
MANIFEST_FILE = f"{cwd}/{MANIFEST_FILE_REL}"
print("Running test vector pre-flight check script", flush=True)
manifest = {}
if not os.path.isdir(GENERATED_DIR):
os.mkdir(GENERATED_DIR)
try:
with open(MANIFEST_FILE) as ml:
lines = ml.readlines()
for line in lines:
line = line.split(',')
manifest[line[0].strip()] = line[1].strip()
except FileNotFoundError:
print('No test vectors generated yet. Regenerating all...', flush=True)
for _, _, files in os.walk(GENERATORS_DIR, topdown=False):
for f in files:
if f[-3:] != '.py' or f == 'matx_common.py':
continue
hash = hashlib.md5(open(GENERATORS_DIR + f,'rb').read()).hexdigest().strip()
if f not in manifest or manifest[f] != hash:
print(f"Regenerating {f}", flush=True)
try:
p = subprocess.check_output(GENERATORS_DIR + f, cwd=GENERATED_DIR)
manifest[f] = hash
except subprocess.CalledProcessError as ec:
print(f"Calling script {f} failed with error code {ec.returncode}: {ec.output}", flush=True)
m = open(MANIFEST_FILE, "w")
for k, v in manifest.items():
m.write(f"{k},{v}\n") | MatX-main | test/test_vectors/GenerateTestVectors.py |
#!/usr/bin/env python3
import numpy as np
from scipy import signal
from scipy import io
from numpy import random
import math
import matx_common
from typing import Dict, List
class mvdr_beamformer:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
data_len = self.size[0]
num_beams = self.size[1]
num_el = self.size[2]
v = np.random.randn(num_el, num_beams) + \
np.random.randn(num_el, num_beams)*1j
vh = v.conj().T
in_vec = np.random.randn(num_el, data_len) + \
np.random.randn(num_el, data_len)*1j
out_cbf = np.matmul(vh, in_vec)
snap_len = 2 * num_el
load_coeff = 0.1
inv_slice = in_vec[:, 0:snap_len]
cov_mat = np.matmul(inv_slice, inv_slice.conj().T) / \
snap_len + load_coeff * np.eye(num_el)
cov_inv = np.linalg.inv(cov_mat)
return {
'cov_inv': cov_inv,
'cov_mat': cov_mat,
'in_vec': in_vec,
'v': v,
'out_cbf': out_cbf
}
| MatX-main | test/test_vectors/generators/mvdr_beamformer.py |
#!/usr/bin/env python3
import numpy as np
from scipy import linalg as slinalg
from numpy import random
import math
import matx_common
from typing import Dict, List
class inv:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
n = self.size[0]
batches = self.size[1]
# Create a positive-definite matrix
if batches > 1:
A = matx_common.randn_ndarray((batches, n,n), self.dtype)
else:
A = matx_common.randn_ndarray((n,n), self.dtype)
A_inv = np.linalg.inv(A)
return {
'A': A,
'A_inv': A_inv
}
class cholesky:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
n = self.size[0]
A = np.random.randn(n, n)
B = np.matmul(A, A.conj().T)
B = B + n*np.eye(n)
L = np.linalg.cholesky(B)
return {
'B': B,
'L': L
}
class lu:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
m, n = self.size[0], self.size[1]
A = np.random.randn(m, n)
P, L, U = slinalg.lu(A)
return {
'A': A,
'P': P,
'L': L,
'U': U,
}
class qr:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
m, n = self.size[0], self.size[1]
A = np.random.randn(m, n)
Q, R = np.linalg.qr(A)
return {
'A': A,
'Q': Q,
'R': R,
}
class svd:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
m, n = self.size[0], self.size[1]
A = matx_common.randn_ndarray((m,n), self.dtype)
U, S, V = np.linalg.svd(A)
return {
'A': A,
'U': U,
'S': S,
'V': V
}
class eig:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
n = self.size[0]
# Create a positive-definite matrix
A = np.random.randn(n, n)
B = np.matmul(A, A.conj().T)
B = B + n*np.eye(n)
W, V = np.linalg.eig(B)
return {
'B': B,
'W': W,
'V': V
}
class det:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def run(self):
n = self.size[0]
# Create a positive-definite matrix
A = np.random.randn(n, n)
det = np.linalg.det(A)
return {
'A': A,
'det': det
}
| MatX-main | test/test_vectors/generators/00_solver.py |
#!/usr/bin/env python3
import numpy as np
import scipy.signal as ss
from typing import Dict, List
class kron_operator:
def __init__(self, dtype: str, size: List[int]):
pass
def run(self) -> Dict[str, np.array]:
b = np.array([[1, -1], [-1, 1]])
self.square = np.kron(np.eye(4), b)
a = np.array([[1, 2, 3], [4, 5, 6]])
self.rect = np.kron(a, np.ones([2, 2]))
return {
'square': self.square,
'rect': self.rect
}
class meshgrid_operator:
def __init__(self, dtype: str, size: List[int]):
self.size = size
def run(self) -> Dict[str, np.array]:
self.x = np.linspace(1, self.size[0], self.size[0])
self.y = np.linspace(1, self.size[1], self.size[1])
[X, Y] = np.meshgrid(self.x, self.y)
return {
'X': X,
'Y': Y
}
class window:
def __init__(self, dtype: str, size: List[int]):
self.win_size = size[0]
def run(self) -> Dict[str, np.array]:
self.hamming = np.hamming(self.win_size)
self.hanning = np.hanning(self.win_size)
self.blackman = np.blackman(self.win_size)
self.bartlett = np.bartlett(self.win_size)
self.flattop = ss.flattop(self.win_size)
return {
'hamming': self.hamming,
'hanning': self.hanning,
'blackman': self.blackman,
'bartlett': self.bartlett,
'flattop': self.flattop
}
class stats:
def __init__(self, dtype: str, size: List[int]):
self.size = size
def run(self) -> Dict[str, np.array]:
x = np.random.rand(self.size[0])
var_ub = np.var(x)
var_ml = np.var(x, ddof = 0)
std = np.std(x)
return {
'x': x,
'var_ub': var_ub,
'var_ml': var_ml,
'std': std
}
class contraction:
def __init__(self, dtype: str, size: List[int]):
pass
def run(self) -> Dict[str, np.array]:
a1 = np.arange(60.).reshape(3,4,5)
b1 = np.arange(24.).reshape(4,3,2)
c1 = np.einsum('ijk,jil->kl', a1, b1)
return {
'a_float3d': a1,
'b_float3d': b1,
'c_float3d': c1
} | MatX-main | test/test_vectors/generators/00_operators.py |
#!/usr/bin/env python3
import numpy as np
from scipy import fft as sf
from scipy import signal as ss
from numpy import random
from typing import Dict, List
class dct:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
def run(self):
N = self.size[0]
# Create a positive-definite matrix
x = np.random.randn(N,)
Y = sf.dct(x)
return {
'x': x,
'Y': Y
}
class chirp:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
def run(self):
N = self.size[0]
end = self.size[1]
f0 = self.size[2]
f1 = self.size[3]
t = np.linspace(0, end, N)
Y = ss.chirp(t, f0, t[-1], f1, 'linear')
return {
'Y': Y
}
class fftfreq:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
def run(self):
N = self.size[0]
F1 = sf.fftfreq(N)
F2 = sf.fftfreq(N+1)
F3 = sf.fftfreq(N, 0.5)
return {
'F1':F1,
'F2':F2,
'F3':F3,
}
| MatX-main | test/test_vectors/generators/01_signal.py |
#!/usr/bin/env python3
import numpy as np
import sys
from scipy import special
from scipy.constants import c, pi
import matx_common
from typing import Dict, List
class softmax:
def __init__(self, dtype: str, size: List[int]):
np.random.seed(1234)
self.t1 = matx_common.randn_ndarray((size[-1],), dtype)
self.t3 = matx_common.randn_ndarray((size[0], size[1], size[2]), dtype)
self.res = {
't1': self.t1,
't3': self.t3
}
def run(self):
self.res['t1'] = self.t1
self.res['t3'] = self.t3
self.res['t1_sm'] = special.softmax(self.t1)
self.res['t3_sm_axis2'] = special.softmax(self.t3, axis=2)
return self.res
class percentile:
def __init__(self, dtype: str, size: List[int]):
np.random.seed(1234)
self.t1e = matx_common.randn_ndarray((size[0],), dtype)
self.t1o = matx_common.randn_ndarray((size[0] + 1,), dtype)
self.res = {
't1e': self.t1e,
't1o': self.t1o
}
def run(self):
self.res['t1e_linear50'] = np.percentile(self.t1e, 50, interpolation='linear')
self.res['t1e_linear80'] = np.percentile(self.t1e, 80, interpolation='linear')
self.res['t1e_lower50'] = np.percentile(self.t1e, 50, interpolation='lower')
self.res['t1e_lower80'] = np.percentile(self.t1e, 80, interpolation='lower')
self.res['t1e_higher50'] = np.percentile(self.t1e, 50, interpolation='higher')
self.res['t1e_higher80'] = np.percentile(self.t1e, 80, interpolation='higher')
self.res['t1o_linear50'] = np.percentile(self.t1o, 50, interpolation='linear')
self.res['t1o_linear80'] = np.percentile(self.t1o, 80, interpolation='linear')
self.res['t1o_lower50'] = np.percentile(self.t1o, 50, interpolation='lower')
self.res['t1o_lower80'] = np.percentile(self.t1o, 80, interpolation='lower')
self.res['t1o_higher50'] = np.percentile(self.t1o, 50, interpolation='higher')
self.res['t1o_higher80'] = np.percentile(self.t1o, 80, interpolation='higher')
return self.res
| MatX-main | test/test_vectors/generators/00_reductions.py |
#!/usr/bin/env python3
import numpy as np
from scipy import signal
from scipy import io
from numpy import random
import math
import os
import matx_common
import cupy as cp
from typing import Dict, List
class simple_radar_pipeline:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
self.num_channels = 1
def set_channels(self, nc: int):
self.num_channels = nc
def run(self):
num_pulses = self.size[0]
num_uncompressed_range_bins = self.size[1]
waveform_length = self.size[2]
num_compressed_range_bins = num_uncompressed_range_bins - waveform_length + 1
NDfft = 256
#num_channels = 16
res = {}
x = matx_common.randn_ndarray(
(num_pulses, num_uncompressed_range_bins), self.dtype)
res['x_init'] = x.copy()
waveform = matx_common.randn_ndarray((waveform_length,), self.dtype)
res['waveform'] = waveform.copy()
window = signal.hamming(waveform_length)
waveform_windowed = waveform * window
res['waveform_windowed'] = waveform_windowed.copy()
waveform_windowed_norm = waveform_windowed / \
np.linalg.norm(waveform_windowed)
res['waveform_windowed_norm'] = waveform_windowed_norm.copy()
Nfft = 2**math.ceil(
math.log2(np.max([num_uncompressed_range_bins, waveform_length])))
W = np.conj(np.fft.fft(waveform_windowed_norm, Nfft))
res['W'] = W.copy()
X = np.fft.fft(x, Nfft, 1)
res['X'] = X.copy()
for pulse in range(num_pulses):
y = np.fft.ifft(np.multiply(X[pulse, :], W), Nfft, 0)
x[pulse, 0:num_compressed_range_bins] = y[0:num_compressed_range_bins]
x_compressed = x[:, 0:num_compressed_range_bins]
if self.num_channels > 1:
x_compressed_stack = np.stack([x_compressed] * self.num_channels)
res['x_compressed'] = x_compressed_stack.copy()
else:
res['x_compressed'] = x_compressed.copy()
x_conv2 = signal.convolve2d(
x_compressed, np.matrix([[1], [-2], [1]]), 'valid')
if self.num_channels > 1:
x_conv2_stack = np.stack([x_conv2] * self.num_channels)
res['x_conv2'] = x_conv2_stack.copy()
else:
res['x_conv2'] = x_conv2.copy()
num_pulses = x_conv2.shape[0]
window = np.transpose(np.repeat(np.expand_dims(
signal.hamming(num_pulses), 0), num_compressed_range_bins, axis=0))
X_window = np.fft.fft(np.multiply(x_conv2, window), NDfft, 0)
if self.num_channels > 1:
X_window_stack = np.stack([X_window] * self.num_channels).copy()
res['X_window'] = X_window_stack
else:
res['X_window'] = X_window.copy()
mask = np.transpose(np.asarray([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]]))
norm = signal.convolve2d(np.ones(X_window.shape), mask, 'same')
res['norm_conv2'] = norm.copy()
Xpow = np.abs(X_window)**2
res['Xpow'] = Xpow.copy()
background_averages = np.divide(
signal.convolve2d(Xpow, mask, 'same'), norm)
if self.num_channels > 1:
ba_stacked = np.stack([background_averages] * self.num_channels)
res['background_averages'] = ba_stacked.copy()
else:
res['background_averages'] = background_averages.copy()
Pfa = 1e-5
alpha = np.multiply(norm, np.power(Pfa, np.divide(-1.0, norm)) - 1)
dets = np.zeros(Xpow.shape)
dets[np.where(Xpow > np.multiply(alpha, background_averages))] = 1
res['alpha'] = alpha.copy()
res['dets'] = dets.copy()
if self.num_channels > 1:
dets_stacked = np.stack([dets] * self.num_channels)
res['dets'] = dets_stacked.copy()
else:
res['dets'] = dets.copy()
return res
class ambgfun:
def __init__(self, dtype: str, size: List[int]):
cp.random.seed(1234)
self.size = size
self.dtype = dtype
os.environ['CUPY_CACHE_DIR'] = "/tmp"
def run(self):
siglen = self.size[0]
x = matx_common.randn_ndarray((siglen,), self.dtype)
y = None
fs = 1e3
cutValue = 1.0
_new_ynorm_kernel = cp.ElementwiseKernel(
"int32 xlen, raw T xnorm, raw T ynorm",
"T out",
"""
int row = i / xlen;
int col = i % xlen;
int x_col = col - ( xlen - 1 ) + row;
if ( ( x_col >= 0 ) && ( x_col < xlen ) ) {
out = ynorm[col] * thrust::conj( xnorm[x_col] );
} else {
out = T(0,0);
}
""",
"_new_ynorm_kernel",
options=("-std=c++11",),
)
cut = 'delay'
if 'float64' in x.dtype.name:
x = cp.asarray(x, dtype=cp.complex128)
elif 'float32' in x.dtype.name:
x = cp.asarray(x, dtype=cp.complex64)
else:
x = cp.asarray(x)
xnorm = x / cp.linalg.norm(x)
if y is None:
y = x
ynorm = xnorm
else:
ynorm = y / cp.linalg.norm(y)
len_seq = len(xnorm) + len(ynorm)
nfreq = 2**math.ceil(math.log2(len_seq - 1))
# Consider for deletion as we add different cut values
"""
if len(xnorm) < len(ynorm):
len_diff = len(ynorm) - len(xnorm)
ynorm = cp.concatenate(ynorm, cp.zeros(len_diff))
elif len(xnorm) > len(ynorm):
len_diff = len(xnorm) - len(ynorm)
xnorm = cp.concatenate(xnorm, cp.zeros(len_diff))
"""
xlen = len(xnorm)
# if cut == '2d':
new_ynorm = cp.empty((len_seq - 1, xlen), dtype=xnorm.dtype)
_new_ynorm_kernel(xlen, xnorm, ynorm, new_ynorm)
amf_2d = nfreq * cp.abs(cp.fft.fftshift(
cp.fft.ifft(new_ynorm, nfreq, axis=1), axes=1))
# elif cut == 'delay':
Fd = cp.arange(-fs / 2, fs / 2, fs / nfreq)
fftx = cp.fft.fft(xnorm, nfreq) * \
cp.exp(1j * 2 * cp.pi * Fd * cutValue)
xshift = cp.fft.ifft(fftx)
ynorm_pad = cp.zeros(nfreq) + cp.zeros(nfreq)*1j
ynorm_pad[:ynorm.shape[0]] = ynorm
amf_delay = nfreq * cp.abs(cp.fft.ifftshift(
cp.fft.ifft(ynorm_pad * cp.conj(xshift), nfreq)))
# elif cut == 'doppler':
t = cp.arange(0, xlen) / fs
ffty = cp.fft.fft(ynorm, len_seq - 1)
fftx = cp.fft.fft(xnorm * cp.exp(1j * 2 * cp.pi * cutValue * t),
len_seq - 1)
amf_doppler = cp.abs(cp.fft.fftshift(
cp.fft.ifft(ffty * cp.conj(fftx))))
return {
'amf_2d': cp.asnumpy(amf_2d),
'amf_delay': cp.asnumpy(amf_delay),
'amf_doppler': cp.asnumpy(amf_doppler),
'x': cp.asnumpy(x),
}
| MatX-main | test/test_vectors/generators/01_radar.py |
import numpy as np
def tup_2_string(x):
return '_'.join(reversed(list(map(str, x))))
def to_file(var, name):
if (var.dtype == np.complex128):
var.astype(np.complex64).tofile(
f'{name}_{tup_2_string(var.shape)}_complex64.bin')
elif (var.dtype == np.float64):
var.astype(np.float32).tofile(
f'{name}_{tup_2_string(var.shape)}_float32.bin')
else:
var.tofile(f'{name}_{tup_2_string(var.shape)}_{str(var.dtype)}.bin')
def randn_ndarray(tshape, dtype):
if np.issubdtype(dtype, np.floating):
return np.random.randn(*tshape)
else:
return np.random.randn(*tshape) + 1j*np.random.randn(*tshape)
| MatX-main | test/test_vectors/generators/matx_common.py |
#!/usr/bin/env python3
import math
import numpy as np
import sys
from scipy import io
from scipy import signal
from scipy.constants import c, pi
from scipy.fft import ifft
import matx_common
from typing import Dict, List
class conv_operators:
def __init__(self, dtype: str, size: List[int]):
np.random.seed(1234)
self.a = matx_common.randn_ndarray((size[0],), dtype)
self.b = matx_common.randn_ndarray((size[1],), dtype)
self.res = {
'a_op': self.a,
'b_op': self.b
}
def conv(self):
self.res['conv_full'] = np.convolve(self.a, self.b, 'full')
self.res['conv_same'] = np.convolve(self.a, self.b, 'same')
self.res['conv_valid'] = np.convolve(self.a, self.b, 'valid')
return self.res
def corr(self):
self.res['corr'] = np.correlate(self.a, self.b, 'full')
return self.res
def corr_swap(self):
self.res['corr_swap'] = np.correlate(self.b, self.a, 'full')
return self.res
class conv2d_operators:
def __init__(self, dtype: str, size: List[int]):
np.random.seed(1234)
self.a = matx_common.randn_ndarray((size[0],size[1]), dtype)
self.b = matx_common.randn_ndarray((size[2],size[3]), dtype)
self.res = {
'a_op': self.a,
'b_op': self.b
}
def conv2d(self):
self.res['conv_full'] = signal.convolve2d(self.a, self.b, 'full')
self.res['conv_same'] = signal.convolve2d(self.a, self.b, 'same')
self.res['conv_valid'] = signal.convolve2d(self.a, self.b, 'valid')
return self.res
class matmul_operators:
def __init__(self, dtype: str, size: List[int]):
np.random.seed(1234)
self.size = size
self.dtype = dtype
if len(size) == 3:
self.res = {
'a': matx_common.randn_ndarray((size[-3], size[-2]), dtype),
'b': matx_common.randn_ndarray((size[-2], size[-1]), dtype)
}
else:
self.res = {
'a': matx_common.randn_ndarray((*size[:-3], size[-3], size[-2]), dtype),
'b': matx_common.randn_ndarray((*size[:-3], size[-2], size[-1]), dtype)
}
def run(self) -> Dict[str, np.ndarray]:
self.res['c'] = self.res['a'] @ self.res['b']
# Create the strided batched version
if len(self.res['c'].shape) == 3:
self.res['cs'] = self.res['c'][::2,:,:]
return self.res
def run_a_transpose(self) -> Dict[str, np.ndarray]:
self.res['a'] = matx_common.randn_ndarray((self.size[1], self.size[0]), self.dtype)
self.res['c'] = np.transpose(self.res['a']) @ self.res['b']
return self.res
def run_b_transpose(self) -> Dict[str, np.ndarray]:
self.res['b'] = matx_common.randn_ndarray((self.size[2], self.size[1]), self.dtype)
self.res['c'] = self.res['a'] @ np.transpose(self.res['b'])
return self.res
def run_transpose(self) -> Dict[str, np.ndarray]:
self.res['c'] = np.transpose(self.res['a'] @ self.res['b'])
return self.res
def run_mixed(self) -> Dict[str, np.ndarray]:
float_to_complex_dtype = {np.float32 : np.complex64, np.float64 : np.complex128}
a = self.res['a']
complex_type = float_to_complex_dtype[a.dtype.type]
complex_a = a.astype(complex_type)
self.res['c'] = complex_a @ self.res['b']
return self.res
class cov_operators:
def __init__(self, dtype: str, size: List[int]):
np.random.seed(1234)
self.size = size
self.res = {
'a': matx_common.randn_ndarray((size[0], size[0]), dtype)
}
def cov(self) -> Dict[str, np.ndarray]:
# Python uses rows instead of columns for samples. Transpose here to match MATLAB
c_cov = np.cov(self.res['a'], rowvar=False)
# When computing covariance, Python uses E[XX'] whereas MATLAB and MatX use E[X'X]. Conjugate the
# answer here to make them match
c_cov = np.conj(c_cov)
self.res['c_cov'] = c_cov
return self.res
class resample_poly_operators:
np_random_state = None
def __init__(self, dtype: str, size: List[int]):
if not resample_poly_operators.np_random_state:
# We want reproducible results, but do not want to create random vectors that
# are too similar between test cases. If we seed every time with the same value
# and then create test cases with e.g. 1000 and 2000 samples, the first 1000
# samples will be identical in both case. Thus, we seed only once and store the
# state from one call to the next thereafter.
np.random.seed(1234)
else:
np.random.set_state(resample_poly_operators.np_random_state)
self.size = size
up = size[2]
down = size[3]
gcd = math.gcd(up, down)
up //= gcd
down //= gcd
self.res = {
'a': matx_common.randn_ndarray((size[0],), dtype),
'filter_random': matx_common.randn_ndarray((size[1],), dtype),
'up': up,
'down': down
}
# Create a filter compatible with scipy's resample_poly
max_rate = max(up, down)
f_c = 1. / max_rate
half_len = 10 * max_rate
if up != 1 or down != 1:
self.res['filter_default'] = signal.firwin(2 * half_len + 1, f_c, window=('kaiser',5.0)).astype(dtype)
resample_poly_operators.np_random_state = np.random.get_state()
def resample(self) -> Dict[str, np.ndarray]:
self.res['b_random'] = signal.resample_poly(self.res['a'], self.res['up'], self.res['down'], window=self.res['filter_random'])
if 'filter_default' in self.res:
self.res['b_default'] = signal.resample_poly(self.res['a'], self.res['up'], self.res['down'])
return self.res
class channelize_poly_operators:
np_random_state = None
def __init__(self, dtype: str, size: List[int]):
if not channelize_poly_operators.np_random_state:
# We want reproducible results, but do not want to create random vectors that
# are too similar between test cases. If we seed every time with the same value
# and then create test cases with e.g. 1000 and 2000 samples, the first 1000
# samples will be identical in both case. Thus, we seed only once and store the
# state from one call to the next thereafter.
np.random.seed(1234)
else:
np.random.set_state(channelize_poly_operators.np_random_state)
self.size = size
self.dtype = dtype
signal_len = size[0]
filter_len = size[1]
num_channels = size[2]
# Remaining dimensions are batch dimensions
if len(size) > 3:
a_dims = size[3:]
a_dims = np.append(a_dims, signal_len)
else:
a_dims = [signal_len]
self.res = {
'a': matx_common.randn_ndarray(a_dims, dtype=dtype),
'filter_random': matx_common.randn_ndarray((filter_len,), dtype=dtype),
'num_channels': num_channels,
}
channelize_poly_operators.np_random_state = np.random.get_state()
def channelize(self) -> Dict[str, np.ndarray]:
def idivup(a, b) -> int: return (a+b-1)//b
h = self.res['filter_random']
num_channels = self.res['num_channels']
x = self.res['a']
num_taps_per_channel = idivup(h.size, num_channels)
if num_channels * num_taps_per_channel > h.size:
h = np.pad(h, (0,num_channels*num_taps_per_channel-h.size))
h = np.reshape(h, (num_channels, num_taps_per_channel), order='F')
x_len_per_channel = idivup(x.shape[-1], num_channels)
x_pad_len = x_len_per_channel * num_channels
num_batches = x.size // x.shape[-1]
out = np.zeros((num_batches, num_channels, x_len_per_channel), dtype=np.complex128)
xr = np.reshape(x, (num_batches, x.shape[-1]))
for batch_ind in range(num_batches):
xpad = xr[batch_ind, :]
if x_pad_len > x.shape[-1]:
xpad = np.pad(xpad, (0,x_pad_len-x.shape[-1]))
# flipud because samples are inserted into the filter banks in order
# M-1, M-2, ..., 0
xf = np.flipud(np.reshape(xpad, (num_channels,x_len_per_channel), order='F'))
buf = np.zeros((num_channels, num_taps_per_channel), dtype=self.dtype)
# We scale the outputs by num_channels because we use the ifft
# and it scales by 1/N for an N-point FFT. We use ifft instead
# of fft because the complex exponentials in the Harris paper
# (c.f. Equation 17) are exp(j * ...) instead of exp(-j * ...)
# whereas scipy uses the negative version for DFTs.
scale = num_channels
for i in range(x_len_per_channel):
buf[:, 1:] = buf[:, 0:num_taps_per_channel-1]
buf[:, 0] = xf[:, i]
for j in range(num_channels):
out[batch_ind, j, i] = scale * np.dot(np.squeeze(buf[j,:]), np.squeeze(h[j,:]))
out[batch_ind,:,:] = ifft(out[batch_ind,:,:], axis=0)
if num_batches > 1:
s = list(x.shape)
s[-1] = num_channels
s = np.append(s, x_len_per_channel)
perm = np.arange(len(x.shape)+1)
perm[-2] = len(x.shape)
perm[-1] = len(x.shape)-1
out = np.transpose(np.reshape(out, s), axes=perm)
else:
out = np.transpose(np.reshape(out, out.shape[1:]), axes=[1,0])
self.res['b_random'] = out
return self.res
class fft_operators:
def __init__(self, dtype: str, size: List[int]):
self.size = size
self.dtype = dtype
np.random.seed(1234)
def fft_1d(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.fft(seq, self.size[1])
}
def fft_1d_ortho(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.fft(seq, self.size[1], norm="ortho")
}
def fft_1d_fwd(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.fft(seq, self.size[1], norm="forward")
}
def fft_1d_batched(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],self.size[1]), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.fft(seq, self.size[2])
}
def ifft_1d(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.ifft(seq, self.size[1])
}
def ifft_1d_ortho(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.ifft(seq, self.size[1], norm="ortho")
}
def ifft_1d_fwd(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.ifft(seq, self.size[1], norm="forward")
}
def rfft_1d(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.rfft(seq, self.size[1])
}
def rfft_1d_batched(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],self.size[1]), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.rfft(seq, self.size[2])
}
def irfft_1d(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray((self.size[0],), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.irfft(seq, self.size[1])
}
def fft_2d(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray(
(self.size[0], self.size[1]), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.fft2(seq, (self.size[1], self.size[1]))
}
def ifft_2d(self) -> Dict[str, np.ndarray]:
seq = matx_common.randn_ndarray(
(self.size[0], self.size[1]), self.dtype)
return {
'a_in': seq,
'a_out': np.fft.ifft2(seq, (self.size[1], self.size[1]))
}
class pwelch_operators:
def __init__(self, dtype: str, params: List[int]):
self.dtype = dtype
self.signal_size = params[0]
self.nperseg = params[1]
self.noverlap = params[2]
self.nfft = params[3]
self.ftone = params[4]
self.sigma = params[5]
np.random.seed(1234)
def pwelch_complex_exponential(self) -> Dict[str, np.ndarray]:
s = np.exp(2j*np.pi*self.ftone*np.linspace(0,self.signal_size-1,self.signal_size)/self.nfft)
n = np.random.normal(loc=0,scale=self.sigma,size=self.signal_size) + 1j*np.random.normal(loc=0,scale=self.sigma,size=self.signal_size)
x = s + n
f, Pxx = signal.welch(x,
fs=1./self.nfft,
window=np.ones(self.nperseg),
nperseg=self.nperseg,
noverlap=self.noverlap,
nfft=self.nfft,
return_onesided=False,
scaling = 'density',
detrend=False)
return {
'x_in': x,
'Pxx_out': Pxx
} | MatX-main | test/test_vectors/generators/00_transforms.py |
#!/usr/bin/env python3
import numpy as np
from typing import Dict, List
import os
class csv:
def __init__(self, dtype: str, sizes: List[int]):
self.dtype = dtype
self.files = ("../test/00_io/small_csv_comma_nh.csv", "../test/00_io/small_csv_complex_comma_nh.csv")
def run(self) -> Dict[str, np.array]:
res = {}
for file in self.files:
res[file] = np.genfromtxt(file, delimiter=',', skip_header=1, dtype=self.dtype)
return res | MatX-main | test/test_vectors/generators/00_file_io.py |
#!/usr/bin/env python3
import numpy as np
import sys
from scipy import io
from scipy.constants import c, pi
import matx_common
from typing import Dict, List
class matx_python_tests:
def __init__(self, dtype: str, size: List[int]):
pass
def run(self) -> Dict[str, np.ndarray]:
seye = np.eye(1000, dtype=float)
return {
'eye_1000': seye,
}
| MatX-main | test/test_vectors/generators/00_python_tests.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# -- Project information -----------------------------------------------------
project = "rapids-cmake"
copyright = "2021, NVIDIA"
author = "NVIDIA"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "22.10"
# The full version, including alpha/beta/rc tags.
release = "22.10.00"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx_copybutton",
"sphinxcontrib.moderncmakedomain"
]
copybutton_prompt_text = ">>> "
ipython_mplbackend = "str"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst']
source_suffix = {".rst": "restructuredtext"}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_pydata_theme"
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default,
# so no need to specify it
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "rapidscmakedoc"
# Intersphinx mappings for referencing external documentation
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"cmake": ("https://cmake.org/cmake/help/latest/", None),
}
# Config numpydoc
numpydoc_show_inherited_class_members = True
numpydoc_class_members_toctree = False
autoclass_content = "init"
def setup(app):
app.add_js_file("copybutton_pydocs.js")
app.add_css_file("params.css")
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
| MatX-main | cmake/rapids-cmake/docs/conf.py |
#!/usr/bin/python3
import os
from pathlib import Path
from subprocess import PIPE, run
examples = [
x for x in Path(__file__).parent.iterdir() if x.is_dir() and (x / 'CMakeLists.txt').exists()
]
assert(len(examples) > 0)
def runCommand(command):
print('- %s' % command)
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
if result.returncode != 0:
print("error while running '%s':\n" % command, ' ' + str(result.stderr).replace('\n', '\n '))
exit(result.returncode)
return result.stdout
print('')
for example in examples:
print("running example %s" % example.name)
print("================" + ('=' * len(example.name)))
project = Path(".") / 'build' / example.name
configure = runCommand('cmake -H%s -B%s' % (example, project))
print(' ' + '\n '.join([line for line in configure.split('\n') if 'CPM:' in line]))
build = runCommand('cmake --build %s -- -j%i' % (project, os.cpu_count() / 2))
print(' ' + '\n '.join([line for line in build.split('\n') if 'Built target' in line]))
print('')
| MatX-main | public/cpm-cmake/examples/build_all.py |
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
###############################################################################
# Stuart build for host-based unittests.
from edk2nv.stuart import NVIDIACiSettingsManager
class HostBasedTestSettingsManager(NVIDIACiSettingsManager):
''' CiSettingsManager for host-based tests. '''
def GetName(self):
return "HostBasedTests"
def GetPackagesPath(self):
return super().GetPackagesPath() + ["edk2-nvidia/Platform/NVIDIA/"]
| edk2-nvidia-main | Platform/NVIDIA/HostBasedTests/TestBuild.py |
# Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
###############################################################################
# Stuart build for NVIDIA StandaloneMm UEFI firmware
from pathlib import Path
from edk2nv.stuart import NVIDIASettingsManager, NVIDIAPlatformBuilder
from edk2nv.sptool import sptool
class StandaloneMmSettingsManager(NVIDIASettingsManager):
''' SettingsManager for NVIDIA's StandaloneMm platform. '''
def GetName(self):
return "StandaloneMm"
def GetActiveScopes(self):
return super().GetActiveScopes() + ["standalonemm"]
def GetFirmwareVolume(self):
return "FV/UEFI_MM.Fv"
def GetDscName(self):
return ("edk2-nvidia/Platform/NVIDIA/StandaloneMm/StandaloneMm.dsc")
def GetDtbManifestFile(self):
''' Return the name of the built DTB manifest file. '''
return (
"AARCH64/Silicon/NVIDIA/StandaloneMm/Manifest/Manifest/OUTPUT/"
"StandaloneMm.dtb"
)
class PlatformBuilder(NVIDIAPlatformBuilder):
''' PlatformBuilder for NVIDIA's StandaloneMm. '''
SettingsManager = StandaloneMmSettingsManager
def PlatformPostBuild(self):
''' Additional build steps for StandaloneMm platform. '''
ret = super().PlatformPostBuild()
if ret != 0:
return ret
build_dir = Path(self.env.GetValue("BUILD_OUTPUT_BASE"))
# Generate the StMM pkg file.
target = self.settings.GetTarget()
sptool(
manifest_file=build_dir / self.settings.GetDtbManifestFile(),
img_file=build_dir / self.settings.GetFirmwareVolume(),
out_file=f"images/StandaloneMm_{target}.pkg"
)
return 0
| edk2-nvidia-main | Platform/NVIDIA/StandaloneMm/PlatformBuild.py |
# Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
###############################################################################
# Stuart build for NVIDIA Jetson UEFI firmware
from edk2nv.stuart import NVIDIASettingsManager, NVIDIAPlatformBuilder
class JetsonSettingsManager(NVIDIASettingsManager):
''' SettingsManager for NVIDIA's Jetson platform. '''
def GetName(self):
return "Jetson"
def GetActiveScopes(self):
return super().GetActiveScopes() + ["jetson"]
def GetFirmwareVolume(self):
return "FV/UEFI_NS.Fv"
def GetBootAppName(self):
return "AARCH64/L4TLauncher.efi"
def GetDscName(self):
return "edk2-nvidia/Platform/NVIDIA/Jetson/Jetson.dsc"
def GetDtbPath(self):
return "AARCH64/Silicon/NVIDIA/Tegra/DeviceTree/DeviceTree/OUTPUT"
def GetConfigFiles(self):
return ["edk2-nvidia/Platform/NVIDIA/Jetson/Jetson.defconfig"]
class PlatformBuilder(NVIDIAPlatformBuilder):
''' PlatformBuilder for NVIDIA's Jetson. '''
SettingsManager = JetsonSettingsManager
| edk2-nvidia-main | Platform/NVIDIA/Jetson/PlatformBuild.py |
# Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
###############################################################################
# Stuart build for NVIDIA Server UEFI firmware
from edk2nv.stuart import NVIDIASettingsManager, NVIDIAPlatformBuilder
class ServerSettingsManager(NVIDIASettingsManager):
''' SettingsManager for NVIDIA's Server platform. '''
def GetName(self):
return "Server"
def GetActiveScopes(self):
return super().GetActiveScopes() + ["server"]
def GetPackagesPath(self):
return super().GetPackagesPath() + [
"edk2-nvidia-server-gpu-sdk"
]
def GetFirmwareVolume(self):
return "FV/UEFI_NS.Fv"
def GetDscName(self):
return ("edk2-nvidia/Platform/NVIDIA/Server/Server.dsc")
def GetConfigFiles(self):
return ["edk2-nvidia/Platform/NVIDIA/Server/Server.defconfig"]
class PlatformBuilder(NVIDIAPlatformBuilder):
''' PlatformBuilder for NVIDIA's Server. '''
SettingsManager = ServerSettingsManager
| edk2-nvidia-main | Platform/NVIDIA/Server/PlatformBuild.py |
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
###############################################################################
# Stuart build for NVIDIA TegraVirt UEFI firmware
from edk2nv.stuart import NVIDIASettingsManager, NVIDIAPlatformBuilder
class TegraVirtSettingsManager(NVIDIASettingsManager):
''' SettingsManager for NVIDIA's TegraVirt platform. '''
def GetName(self):
return "TegraVirt"
def GetActiveScopes(self):
return super().GetActiveScopes() + ["tegravirt"]
def GetFirmwareVolume(self):
return "FV/FVMAIN_COMPACT.Fv"
def GetDscName(self):
return "edk2-nvidia/Platform/NVIDIA/TegraVirt/TegraVirt.dsc"
class PlatformBuilder(NVIDIAPlatformBuilder):
''' PlatformBuilder for NVIDIA's TegraVirt. '''
SettingsManager = TegraVirtSettingsManager
| edk2-nvidia-main | Platform/NVIDIA/TegraVirt/PlatformBuild.py |
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
###############################################################################
# Stuart build for NVIDIA StandaloneMm UEFI firmware
from edk2nv.stuart import NVIDIASettingsManager, NVIDIAPlatformBuilder
class StandaloneMmOpteeSettingsManager(NVIDIASettingsManager):
''' SettingsManager for NVIDIA's StandaloneMmOptee platform. '''
def GetName(self):
return "StandaloneMmOptee"
def GetFirmwareVolume(self):
return "FV/UEFI_MM.Fv"
def GetDscName(self):
return ("edk2-nvidia/Platform/NVIDIA/StandaloneMmOptee/"
"StandaloneMmOptee.dsc")
class PlatformBuilder(NVIDIAPlatformBuilder):
''' PlatformBuilder for NVIDIA's StandaloneMmOptee. '''
SettingsManager = StandaloneMmOpteeSettingsManager
| edk2-nvidia-main | Platform/NVIDIA/StandaloneMmOptee/PlatformBuild.py |
#!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import argparse
import io
import os
import sys
DEFAULT_ALIGNMENT = 0x10000
def check_file_exists(filename):
"""
Checks that the given filename exists.
If the file does not exist, prints an error and exits.
Otherwise returns silently
"""
if filename and not os.path.isfile(filename):
print("Error: could not find given file:\n"
" {}".format(filename))
sys.exit(1)
def parse_command_line_args():
"""
Parses the command line arguments for the program.
There are two required positional arguments, the first being the
input file name and the second being the output file name.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"input_file",
metavar="INPUT_FILE",
help="Input UEFI FV file name."
)
parser.add_argument(
"output_file",
metavar="OUTPUT_FILE",
help="Output UEFI binary file name."
)
parser.add_argument(
"--alignment",
type=int,
default=DEFAULT_ALIGNMENT,
help=("Required alignment of the output file given as a decimal value. "
"Default value is {}.".format(DEFAULT_ALIGNMENT))
)
args = parser.parse_args()
check_file_exists(args.input_file)
return (
args.input_file,
args.output_file,
args.alignment
)
def FormatUefiBinary (input_filename, output_filename, alignment=DEFAULT_ALIGNMENT):
with io.open(input_filename, 'rb') as input_file:
output_bytes = input_file.read()
unaligned_bytes = os.path.getsize (input_filename) % alignment
if unaligned_bytes!= 0:
output_bytes += bytearray(b'\xFF'*(alignment - unaligned_bytes))
if (not os.path.isdir(os.path.dirname(output_filename))):
os.mkdir(os.path.dirname(output_filename))
with io.open(output_filename, 'wb') as output_file:
output_file.write(output_bytes)
def main():
(input_filename, output_filename, alignment) = parse_command_line_args()
FormatUefiBinary (input_filename, output_filename, alignment)
print("Successfully formatted uefi binary to {}".format(output_filename))
if __name__ == '__main__':
main()
| edk2-nvidia-main | Silicon/NVIDIA/edk2nv/FormatUefiBinary.py |
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
Package containing Python-based tools developed by NVIDIA for EDK2.
'''
| edk2-nvidia-main | Silicon/NVIDIA/edk2nv/__init__.py |
#
# This module builds a secure partition pkg file.
#
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import struct
import os
import logging
PAGE_SIZE = 4096
OFFSET_MAGIC = struct.pack("!I", 0x52415346)
def sptool(manifest_file, img_file, out_file):
logging.info("Generating secure partition pkg: %s", out_file)
logging.info(" from image: %s", img_file)
logging.info(" from manifest: %s", manifest_file)
# Header is defined as 6*U32, which is a Python structure of format 'IIIIII'
header_structure = 'IIIIII'
if not os.path.exists(img_file):
logging.fatal("Cannot find image file: %s" % img_file)
return 1
if not os.path.exists(manifest_file):
logging.fatal("Cannot find DTB file: %s" % manifest_file)
return 1
try:
with open(manifest_file, mode='rb') as file:
manifest_data = file.read()
except Exception as e:
logging.error("Could not read DTB file", exc_info=True)
try:
with open(img_file, mode='rb') as file:
img_data = file.read()
except Exception as e:
logging.error("Could not read image file", exc_info=True)
# Prepare the header, magic spells "SPKG", version 1.
magic = 0x474B5053
version = 1
# The Manifest DTB goes after the header, offset is size of header (6*U32)
dtb_offset = 6*4
dtb_size = len(manifest_data)
# The firmware images goes after the DTB and is PAGE_SIZE aligned
fw_offset = int((dtb_size+dtb_offset) / PAGE_SIZE)*PAGE_SIZE + PAGE_SIZE
fw_size = len(img_data)
#Empty space between Manifest and image
space = bytearray(fw_offset - dtb_size - dtb_offset)
header = struct.pack(header_structure, magic, version, dtb_offset, dtb_size, fw_offset, fw_size)
# Check if a magic is present in DTB and replace it with the actual fw_offset
if OFFSET_MAGIC in manifest_data:
manifest_data = manifest_data.replace(OFFSET_MAGIC, bytearray(struct.pack("!I", fw_offset)))
logging.info("Patched Manifest with Image offset")
try:
with open(out_file, 'wb') as f:
f.write(header)
f.write(manifest_data)
f.write(space)
f.write(img_data)
except Exception as e:
logging.error("Could not write output file", exc_info=True)
return 1
logging.info("Wrote PKG into: %s Entrypoint-offset: 0x%x" % (out_file, fw_offset))
return 0
| edk2-nvidia-main | Silicon/NVIDIA/edk2nv/sptool.py |
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
Package containing NVIDIA's extensions to EDK2's stuart build system.
'''
from .builder import * # noqa
from .settings import * # noqa
| edk2-nvidia-main | Silicon/NVIDIA/edk2nv/stuart/__init__.py |
# Copyright (c) Microsoft Corporation.
# Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import os
import sys
import datetime
import logging
import shutil
from pathlib import Path
from edk2toolext.environment.uefi_build import UefiBuilder
from edk2toolext.environment import shell_environment
from edk2toolext.environment.conf_mgmt import ConfMgmt
__all__ = [
"NVIDIAPlatformBuilder",
]
_base_populate_conf_dir = ConfMgmt.populate_conf_dir
''' Stuart's implementation of ConfMgmt.populate_conf_dir(). We're going to
wrap this.
'''
reason_required = "Required by NVIDIA platforms"
reason_setman = "Set in platform SettingsManager"
reason_dynamic = "Dynamically populated"
class NVIDIAPlatformBuilder(UefiBuilder):
''' Base class for NVIDIA PlatformBuilders. '''
def __init__(self):
super().__init__()
# Create an instance of our SettingsManager to use.
# - stuart's invokeables framework finds the SettingsManager and uses
# it, but the edk2_platform_build invokeable doesn't pass the
# SettingsManager into UefiBuilder. So, we have to create our own
# instance.
# - We definitely want the settings manager. In order to support
# stuart, the settings manager already has nearly everything we need.
# We just need to add a few things to support our build extensions.
self.settings = self.SettingsManager()
def MoveConfDir(self):
''' Use a platform-specific Conf directory.
Convince stuart to use `settings.GetConfDirName()` as the Conf
directory. If the Conf directory does not exist, it will be
populated with the template.
Stuart is hard-coded to always use <workspace_root>/Conf as its
Conf directory. However, we want to have platform-specific Conf
directories. In the near term, that avoids race conditions when
building multiple platforms in parallel. In the long term, should
we choose to customize the contents of the Conf directory for a
given platform, we'll have a way to do that.
build.py allows the Conf directory to be moved via command line or
via env. Since stuart wraps our call to build.py, we'll rely on
the env.
'''
ws_dir = Path(self.settings.GetWorkspaceRoot())
confdir_path = ws_dir / self.settings.GetConfDirName()
confdir_name = str(confdir_path)
# Stuart will populate the conf directory with templates. Since it
# doesn't ask a SettingsManager for the name of the Conf directory, we
# need to monkey-patch in an override.
def hooked_populate_conf_dir(self, conf_folder_path, *args, **kwargs):
_base_populate_conf_dir(self, confdir_name, *args, **kwargs)
ConfMgmt.populate_conf_dir = hooked_populate_conf_dir
# Add this Conf directory to UefiBuilder's search path. When it's
# looking for "Conf/target.txt", for example, it will look in each of
# these directories.
self.mws.PACKAGES_PATH.append(confdir_name)
# Stuart doesn't look for "<search_path>/target.txt", but looks for
# "<search_path>/Conf/target.txt" instead. Rather than add another
# "Conf" to our path, we'll no-op it with a symlink.
confconf_path = confdir_path / "Conf"
if not confconf_path.exists():
confdir_path.mkdir(parents=True, exist_ok=True)
confconf_path.symlink_to(".")
def ParseTargetFile(self):
''' Let the user know about expected "error" messages. '''
# See comments in SetPlatformEnv() for an explanation.
logging.debug("The following 'Can't set value' messages are expected")
return super().ParseTargetFile()
#######################################
# UefiBuilder hooks
def AddPlatformCommandLineOptions(self, parserObj):
''' Add build-specific command-line options.
The stuart_build command lacks a --target option, but the other
commands, e.g. stuart_setup, stuart_update, inherit one from
Edk2MultiPkgAwareInvocable. This is the opposite of what we want
for NVIDIA builds. Other vendors use the multi-pkg aware features
to control which scopes are used during setup and update. That
seems appropriate for architecture, but not for target. We see
target as a build-time option, not a setup-time option.
To work-around stuart, we'll add the --target option here, where
only stuart_build will hit it, but retrieve and handle it in
settings.
'''
super().AddPlatformCommandLineOptions(parserObj)
# Add --target for builds
parserObj.add_argument('--target',
dest="nvidia_target", default="DEBUG",
help="Platform target to build")
# build.py uses "-n" and make uses "-j" or "--jobs". Split the
# difference and accept a little of both.
parserObj.add_argument('-n', '--jobs',
dest="JOBS", type=int,
help="Number of concurrent build jobs to run")
def RetrievePlatformCommandLineOptions(self, args):
''' Retrieve command line options from the argparser namespace '''
self._jobs = args.JOBS
def GetMaxJobs(self):
''' Return the value of the --jobs option.
Defaults to `None`, telling stuart to use its default, which is
num_cpus.
'''
return self._jobs
def BuildConfigFile(self):
''' Builds the kconfig .config file for platform if needed.
'''
from kconfiglib import Kconfig
ws_dir = Path(self.settings.GetWorkspaceRoot())
config_out = ws_dir / "nvidia-config" / self.settings.GetName() / ".config"
if config_out.is_file ():
return 0
kconf_file = self.settings.GetKConfigFile()
if (kconf_file == None):
return 0
kconf_path = ws_dir / kconf_file
kconf = Kconfig(kconf_path, warn_to_stderr=False,
suppress_traceback=True)
kconf.warn_assign_undef = True
kconf.warn_assign_override = False
kconf.warn_assign_redun = False
configs = self.settings.GetConfigFiles()
print(kconf.load_config(ws_dir /configs[0]))
for config in configs[1:]:
# replace=False creates a merged configuration
print(kconf.load_config(ws_dir / config, replace=False))
kconf.write_config(os.devnull)
if kconf.warnings:
# Put a blank line between warnings to make them easier to read
for warning in kconf.warnings:
print("\n" + warning, file=sys.stderr)
# Turn all warnings into errors, so that e.g. assignments to undefined
# Kconfig symbols become errors.
#
# A warning is generated by this script whenever a symbol gets a
# different value than the one it was assigned. Keep that one as just a
# warning for now.
raise ValueError("Aborting due to Kconfig warnings")
# Write the merged configuration
print(kconf.write_config(config_out))
return 0
def SetPlatformEnv(self):
''' Setup the environment for this platform.
Called by UefiBuilder.SetEnv() prior to the build and after some
basic defaults have been added. Values from target.txt and the
DEFINE section in the platform's DSC/FDF file are added after this.
Some values are used directly by stuart while most are passed
through the shell environment or the build environment.
Shell environment values can be set as follows:
shell_env = shell_environment.GetEnvironment()
shell_env.set_shell_var("KEY", "value")
Build environment values can be set two ways. If set as follows,
they cannot be overridden:
self.env.SetValue("KEY", "value", "reason")
If the build environment value is set as follows, it can be
overridden by a subclass, target.txt, etc.
shell_env = shell_environment.GetEnvironment()
shell_env.set_build_var("KEY", "value")
Build environment variables eventually find their way into make.
'''
# Move the Conf directory. This isn't an "env" thing, but this is the
# first callback, __init__() is too early, and the next callback is too
# late. Given the options available, this is the right place.
self.MoveConfDir()
logging.debug("Setting env from SettingsManager")
# Preempt the contents of target.txt.
#
# If we don't provide a target.txt for a platform, which is normally
# the case, stuart will copy in a template filled with defaults. Then
# stuart will load those defaults and start using them, which is most
# likely not what we want.
#
# Ideally, we'd load target.txt first, then override where the subclass
# had an opinion. Unfortunately, "after target.txt" is too late;
# stuart immediately uses some values without giving us a chance to
# override them first. Instead, we'll have to set values here and not
# allow target.txt to override them. This works fine, but results in
# annoying "Can't set value" messages. We'll just have to ignore
# those.
ws_dir = Path(self.settings.GetWorkspaceRoot())
# ACTIVE_PLATFORM
# - If not provided by the SettingsManager, the value in target.txt
# will be taken.
dsc_name = self.settings.GetDscName()
if dsc_name:
self.env.SetValue("ACTIVE_PLATFORM", dsc_name, reason_setman)
# TARGET - always take the --target argument via GetTarget(). We
# can't defer to target.txt here because we use GetTarget() to name
# the directory target.txt lives in (via GetConfDirName()).
self.env.SetValue("TARGET", self.settings.GetTarget(), reason_required)
# MAX_CONCURRENT_THREAD_NUMBER - always take the --jobs argument, if
# one was provided.
max_jobs = self.GetMaxJobs()
if max_jobs:
self.env.SetValue("MAX_CONCURRENT_THREAD_NUMBER", max_jobs,
reason_required)
# TARGET_ARCH - always AARCH64 on NVIDIA platforms.
self.env.SetValue("TARGET_ARCH", "AARCH64", reason_required)
# TOOL_CHAIN_TAG
# - If not provided by the SettingsManager, the value in target.txt
# will be taken.
toolchain_tag = self.settings.GetToolchainTag()
if toolchain_tag:
self.env.SetValue("TOOL_CHAIN_TAG", toolchain_tag, reason_setman)
# Set additional build variables
cur_time = datetime.datetime.now()
build_ts = cur_time.astimezone().replace(microsecond=0).isoformat()
self.env.SetValue("BLD_*_BUILD_DATE_TIME", build_ts, reason_dynamic)
self.env.SetValue("BLD_*_BUILD_PROJECT_TYPE", "EDK2", reason_required)
self.env.SetValue("BLD_*_BUILDID_STRING",
self.settings.GetFirmwareVersion(), reason_dynamic)
# Setup build reporting
self.env.SetValue("BUILDREPORTING", "TRUE", reason_required)
self.env.SetValue("BUILDREPORT_TYPES",
self.settings.GetReportTypes(), reason_required)
self.env.SetValue("BUILDREPORT_FILE",
str(ws_dir / self.settings.GetReportFile()),
reason_setman)
# Set shell env
shell_environment.GetEnvironment().set_shell_var(
f"{toolchain_tag}_AARCH64_PREFIX",
self.settings.GetCrossCompilerPrefix())
shell_environment.GetEnvironment().set_shell_var(
f"DTCPP_PREFIX",
self.settings.GetCrossCompilerPrefix())
# - Needed by build.py.
confdir_path = ws_dir / self.settings.GetConfDirName()
shell_environment.GetEnvironment().set_shell_var(
"CONF_PATH", str(confdir_path))
defconf = self.settings.GetConfigFiles()
if defconf:
self.BuildConfigFile ()
# Must return 0 to indicate success.
return 0
def PlatformPreBuild(self):
return 0
def PlatformPostBuild(self):
''' Additional build steps for NVIDIA platforms. '''
from edk2nv.FormatUefiBinary import FormatUefiBinary
ws_dir = Path(self.settings.GetWorkspaceRoot())
build_dir = Path(self.env.GetValue("BUILD_OUTPUT_BASE"))
target = self.settings.GetTarget()
# Store the path to the build directory in a place an upstream build
# system can find it.
builddirfile = ws_dir / self.settings.GetBuildDirFile()
builddirfile.parent.mkdir(parents=True, exist_ok=True)
builddirfile.write_text(str(build_dir))
# Remove the Conf link we added earlier. It can cause problems for
# tools, such as find, that want to spider the build directory. Since
# we're done building, we don't need it any more.
confdir_path = ws_dir / self.settings.GetConfDirName()
confconf_path = confdir_path / "Conf"
if confconf_path.is_symlink():
confconf_path.unlink()
# Generate the firmware image, if appropriate for this platform
fw_rel = self.settings.GetFirmwareVolume()
if fw_rel:
fw_vol = build_dir / fw_rel
fw_img = ws_dir / self.settings.GetFirmwareImageFile()
logging.info("Generating uefi image %s", fw_img)
fw_img.parent.mkdir(parents=True, exist_ok=True)
FormatUefiBinary(str(fw_vol), str(fw_img))
# Copy the boot app, if appropriate for this platform
boot_rel = self.settings.GetBootAppName()
if boot_rel:
boot_path = build_dir / boot_rel
boot_out = ws_dir / self.settings.GetBootAppFile()
logging.info("Copying boot app %s", boot_out)
boot_out.parent.mkdir(parents=True, exist_ok=True)
FormatUefiBinary(str(fw_vol), str(fw_img))
shutil.copyfile(boot_path, boot_out)
# Copy DTBs, if appropriate for this platform
dtb_path = self.settings.GetDtbPath()
if dtb_path:
full_dtb_path = build_dir / dtb_path
# Copy each generated DTB
for src_dtb in full_dtb_path.glob("*.dtb"):
dest_dtb = self.settings.GetDtbFile(src_dtb.stem)
logging.info("Copying DTB %s", dest_dtb)
shutil.copyfile(src_dtb, dest_dtb)
return 0
def PlatformFlashImage(self):
logging.critical("Flash Image not supported")
return 1
| edk2-nvidia-main | Silicon/NVIDIA/edk2nv/stuart/builder.py |
# Copyright (c) Microsoft Corporation.
# Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
import os
import sys
from pathlib import Path
from edk2toolext.invocables.edk2_update import UpdateSettingsManager
from edk2toolext.invocables.edk2_setup import SetupSettingsManager
from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager
from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager
from edk2toolext.invocables.edk2_ci_setup import CiSetupSettingsManager
from edk2toolext.invocables.edk2_ci_build import CiBuildSettingsManager
from edk2toollib.utility_functions import RunCmd
from edk2toolext.environment import shell_environment
__all__ = [
"NVIDIASettingsManager",
"NVIDIACiSettingsManager",
]
reason_setman = "Set in platform CiSettingsManager"
class AbstractNVIDIASettingsManager(UpdateSettingsManager,
SetupSettingsManager):
''' Abstract stuart SettingsManager. '''
def GetName(self):
''' Get the name of the platform being built. '''
raise NotImplementedError(
"GetName() must be implemented in NVIDIASettingsManager "
"subclasses."
)
#######################################
# Edk2InvocableSettingsInterface
def GetWorkspaceRoot(self):
''' Return the root of the workspace.
This implementation will defer to the WORKSPACE environment
variable.
'''
workspace = os.getenv("WORKSPACE")
if workspace:
# Use pathlib to normalize it, but stuart requires it to be a
# string, not a PathLike.
return str(Path(os.getenv("WORKSPACE")))
else:
raise AttributeError("WORKSPACE not defined")
def GetPackagesPath(self):
''' Return paths that should be mapped as edk2 PACKAGE_PATH.
This is the list of directories, relative to the workspace, where
the build will look for packages.
'''
# NOTE: These paths must use a trailing slash to ensure stuart treats
# them properly when computing relative paths.
packages_paths = [path + "/" for path in self._insert_pkgs_paths]
packages_paths.extend([
"edk2/BaseTools/", "edk2/", "edk2-platforms/", "edk2-nvidia/",
"edk2-nvidia-non-osi/", "edk2-non-osi", "edk2-platforms/Features/Intel/OutOfBandManagement/"
])
if self.GetConfigFiles ():
ws_dir = Path(self.GetWorkspaceRoot())
config_path = "nvidia-config/" + self.GetName()
config_fullpath = ws_dir / config_path
config_fullpath.mkdir(parents=True, exist_ok=True)
packages_paths.extend([
config_path
])
return packages_paths
def GetSkippedDirectories(self):
''' Return tuple containing workspace-relative directory paths that should be skipped for processing.
Absolute paths are not supported. '''
# NOTE: These paths must use a trailing slash to ensure stuart treats
# them properly when computing relative paths.
skipped_dirs = [path + "/" for path in self._skipped_dirs]
return skipped_dirs
def GetActiveScopes(self):
''' List of scopes we need for this platform. '''
return ['edk2-build']
def AddCommandLineOptions(self, parserObj):
''' Add command line options to the argparser '''
super().AddCommandLineOptions(parserObj)
parserObj.add_argument(
'--insert-packages-path', dest='nvidia_pkgs_paths', type=str,
help='Insert the given path into the beginning of the list of '
'package paths. Allows build time overrides.',
action="append", default=[])
parserObj.add_argument(
'--insert-skipped-dir', dest='nvidia_skipped_dirs', type=str,
help='Insert the given path into the beginning of the list of '
'skipped paths. Allows build time overrides.',
action="append", default=[])
parserObj.add_argument(
'--require-submodule', dest='nvidia_submodules', type=str,
help='Add a required submodule.',
action="append", default=[])
def RetrieveCommandLineOptions(self, args):
''' Retrieve command line options from the argparser namespace '''
super().RetrieveCommandLineOptions(args)
self._insert_pkgs_paths = args.nvidia_pkgs_paths
self._skipped_dirs = args.nvidia_skipped_dirs
self._added_submodules = args.nvidia_submodules
#######################################
# MultiPkgAwareSettingsInterface
def GetPackagesSupported(self):
''' No-op.
We don't use SetPackages(), so we don't need to implement this
method.
'''
return []
def GetArchitecturesSupported(self):
''' No-op.
We don't use SetArchitectures(), so we don't need to implement this
method.
'''
return []
def GetTargetsSupported(self):
''' No-op.
We don't use SetTargets(), so we don't need to implement this
method.
'''
return []
def GetConfigFiles(self):
''' Return the list of config files that will used for this build
these will be applied in order and are relative to the workspace
'''
return None
class NVIDIASettingsManager(AbstractNVIDIASettingsManager,
PrEvalSettingsManager, BuildSettingsManager,
metaclass=shell_environment.Singleton):
''' Base SettingsManager for various stuart build steps.
Implements the SettingsManager for update, setup, pr-eval, and build
steps for portions common to all NVIDIA platforms. Platforms must
provide a subclass in their PlatformBuid.py.
'''
#######################################
# Edk2InvocableSettingsInterface
def RetrieveCommandLineOptions(self, args):
''' Retrieve command line options from the argparser namespace '''
super().RetrieveCommandLineOptions(args)
if hasattr(args, "nvidia_target"):
# We're in the build step. Pick up the target argument we added in
# builder.py. See the comments in AddPlatformCommandLineOptions()
# to understand this bit of hack.
self._target = args.nvidia_target
else:
# We're not in the build step. Make sure the multi-pkg aware
# options were not used. We don't support them.
if (args.packageList or args.requested_arch or
args.requested_target):
print("The --pkg, --arch, --target are not supported")
sys.exit(1)
#######################################
# NVIDIA settings
# - Additional settings for NVIDIAPlatformBuilder
def GetFirmwareVersionBase(self):
''' Return the base firmware version as a string.
The return from this method will be used as the prefix when setting
BUILDID_STRING, unless the FIRMWARE_VERSION_BASE env is set.
'''
return "202308.0"
def GetFirmwareVersion(self):
''' Return the firmware version as a string.
The return from this method will be used to set BUILDID_STRING.
Subclasses may override it to generate the BUILDID differently.
This implementation will use the format {base}-{suffix}.
- The base can be set via the FIRMWARE_VERSION_BASE env var. If
it is not set, we'll use GetFirmwareVersionBase().
- The suffix can be set via the GIT_SYNC_REVISION env var. If it
is not set, we'll use `git describe`.
'''
base = os.getenv("FIRMWARE_VERSION_BASE")
if not base:
base = self.GetFirmwareVersionBase()
if os.getenv("GIT_SYNC_REVISION") is not None:
return base + "-" + os.getenv("GIT_SYNC_REVISION")
else:
import io
result = io.StringIO()
ret = RunCmd("git", "-C edk2-nvidia describe --always --dirty",
workingdir=self.GetWorkspaceRoot(), outstream=result)
if (ret == 0):
return base + "-" + result.getvalue()
else:
return base + "-Unknown"
def GetFirmwareVolume(self):
''' Return the flash volume to use when generating the firmware image.
Must match a flash volume in the platform's FDF file.
The return must be a string and identify a path relative to the
platform's build output directory.
'''
raise NotImplementedError(
"GetFirmwareVolume() must be implemented in "
"NVIDIASettingsManager subclasses."
)
def GetFirmwareImageFile(self):
''' Return the name of the firmware image.
The firmware image will be generated from the firmware volume and
stored to this filename. This default implementation
will use "images/uefi_{platform_name}_{target}.bin".
Returned as a string identifying a path relative to the workspace
root.
'''
platform_name = self.GetName()
target = self.GetTarget()
return str(Path("images") / f"uefi_{platform_name}_{target}.bin")
def GetDscName(self):
''' Optionally return the path to the platform's DSC file.
If `None`, the value is taken from target.txt. Otherwise, this
will override target.txt
The path must be relative to GetWorkspaceRoot().
This will be used to set ACTIVE_PLATFORM.
'''
return None
def GetToolchainTag(self):
''' Optionally return the toolchain identifier.
Defaults to GCC5. If `None`, the value is taken from target.txt.
Otherwise, this will override target.txt
This will be used to set TOOL_CHAIN_TAG.
'''
tool_chain_tag = os.getenv("TOOL_CHAIN_TAG")
if not tool_chain_tag:
tool_chain_tag = "GCC5"
return tool_chain_tag
def GetReportTypes(self):
''' Return the build report types.
This will be used to set BUILDREPORT_TYPES.
'''
return ("PCD LIBRARY FLASH DEPEX BUILD_FLAGS FIXED_ADDRESS HASH")
def GetReportFile(self):
''' Return the build report filename.
The report will copied to this location after the build. Returned
as a string. This default implementation will use
"reports/{platform_name}_{target}.report"
'''
platform_name = self.GetName()
target = self.GetTarget()
return f"reports/{platform_name}_{target}.report"
def GetCrossCompilerPrefix(self):
''' Return prefix to the toolchain.
This implementation will defer to the CROSS_COMPILER_PREFIX
environment variable.
'''
prefix = os.getenv("CROSS_COMPILER_PREFIX")
if prefix:
# Use pathlib to normalize it, but stuart requires it
# to be a string, not a PathLike.
return str(Path(os.getenv("CROSS_COMPILER_PREFIX")))
else:
raise AttributeError("CROSS_COMPILER_PREFIX not defined")
def GetTarget(self):
''' Return the value of the --target option.
'''
return self._target
def GetConfDirName(self):
''' Return the name of the Conf directory.
This directory name will include the target so that targets
can be built in parallel. Returned as a string. This default
implementation will use "Conf/{platform_name}/{target}".
'''
platform_name = self.GetName()
target = self.GetTarget()
return f"Conf/{platform_name}/{target}"
def GetBootAppName(self):
''' Optionally, the build name of this platform's boot app.
If the platform does not have a boot app, this method should return
`None`.
Returns a path relative to the build directory.
'''
return None
def GetBootAppFile(self):
''' Return the file name of the boot app.
We'll copy the built boot app to this location. This default
implementation will use
"images/BOOTAA64_{platform_name}_{target}.efi".
Returns a path relative to the workspace.
'''
platform_name = self.GetName()
target = self.GetTarget()
return str(Path("images") / f"BOOTAA64_{platform_name}_{target}.efi")
def GetDtbPath(self):
''' Optionally, the build path of this platform's DTB files.
If the platform does not have DTBs, this method should return
`None`.
Returns a path relative to the build directory.
'''
return None
def GetDtbFile(self, dtb_stem):
''' Return the file name of the given DTB file.
We'll copy the built DTB to this location. This default
implementation will use
"images/{dtb_stem}_{platform_name}_{target}.dtbo".
Returns a path relative to the workspace.
'''
platform_name = self.GetName()
target = self.GetTarget()
return str(Path("images") / f"{dtb_stem}_{platform_name}_{target}.dtbo")
def GetBuildDirFile(self):
''' Return the file name of the build dir file.
This file will contain the full path to the build directory. Useful
when an upstream build system needs access to arbitrary build
artifacts. This default implementation will use
"images/builddir_{platform_name}_{target}.txt".
Returns a path relative to the workspace.
'''
platform_name = self.GetName()
target = self.GetTarget()
return str(Path("images") / f"builddir_{platform_name}_{target}.txt")
def GetKConfigFile(self):
''' Return the file name of the main Kconfig configuration.
This file will is used with the platform Kconfig file to generate the
specific configuration.
The path must be relative to GetWorkspaceRoot().
'''
return "edk2-nvidia/Silicon/NVIDIA/Kconfig"
class NVIDIACiSettingsManager(AbstractNVIDIASettingsManager,
CiSetupSettingsManager, CiBuildSettingsManager,
metaclass=shell_environment.Singleton):
''' Base SettingsManager for various stuart CI steps.
Implement some sane defaults for CI steps.
'''
def __init__(self, *args, **kwargs):
''' Initialize the SettingsManager and set up build environment.
This is the best opportunity we have to set the build environment.
Unlike the "build" step, the "ci_build" step doesn't provide a callback
like SetPlatformEnv().
'''
super().__init__(*args, **kwargs)
env = shell_environment.GetBuildVars()
ws_dir = Path(self.GetWorkspaceRoot())
# TOOL_CHAIN_TAG
# - If not provided by the SettingsManager, the value in target.txt
# will be taken.
toolchain_tag = self.GetToolchainTag()
if toolchain_tag:
env.SetValue("TOOL_CHAIN_TAG", toolchain_tag, reason_setman)
# Setup build reporting
env.SetValue("BUILDREPORTING", "TRUE", reason_setman)
env.SetValue("BUILDREPORT_TYPES",
self.GetReportTypes(), reason_setman)
env.SetValue("BUILDREPORT_FILE",
str(ws_dir / self.GetReportFile()),
reason_setman)
def GetArchitecturesSupported(self):
''' return iterable of edk2 architectures supported by this build '''
return ("X64",)
def GetTargetsSupported(self):
''' return iterable of edk2 target tags supported by this build '''
return ("NOOPT",)
def GetActiveScopes(self):
# Add the "host-based-test" scope, which will trigger the plugin that
# runs the unittests after the build.
return super().GetActiveScopes() + ["cibuild", "host-based-test"]
#######################################
# NVIDIA settings
def GetToolchainTag(self):
''' Return the toolchain identifier.
At this time, we only support CI runs with the GCC5 toolchain.
This will be used to set TOOL_CHAIN_TAG.
'''
tool_chain_tag = os.getenv("TOOL_CHAIN_TAG")
if not tool_chain_tag:
tool_chain_tag = "GCC5"
return tool_chain_tag
def GetReportTypes(self):
''' Return the build report types.
This will be used to set BUILDREPORT_TYPES.
'''
return ("PCD LIBRARY FLASH DEPEX BUILD_FLAGS FIXED_ADDRESS HASH")
def GetReportFile(self):
''' Return the build report filename.
The report will copied to this location after the build. Returned
as a string. This default implementation will use
"reports/{platform_name}.report"
'''
platform_name = self.GetName()
return f"reports/{platform_name}.report"
| edk2-nvidia-main | Silicon/NVIDIA/edk2nv/stuart/settings.py |
#!/usr/bin/env python3
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import argparse
def build_profile(builder, network, profile_shapes, default_shape_value=1):
"""
Build optimization profile for the builder and configure the min, opt, max shapes appropriately.
"""
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def override_shape(shape):
return tuple([1 if is_dimension_dynamic(dim) else dim for dim in shape])
profile = builder.create_optimization_profile()
for idx in range(network.num_inputs):
inp = network.get_input(idx)
def get_profile_shape(name):
if name not in profile_shapes:
return None
shapes = profile_shapes[name]
if not isinstance(shapes, list) or len(shapes) != 3:
G_LOGGER.critical("Profile values must be a list containing exactly 3 shapes (tuples or Dims), but received shapes: {:} for input: {:}.\nNote: profile was: {:}.\nNote: Network inputs were: {:}".format(shapes, name, profile_shapes, get_network_inputs(network)))
return shapes
if inp.is_shape_tensor:
shapes = get_profile_shape(inp.name)
if not shapes:
rank = inp.shape[0]
shapes = [(default_shape_value, ) * rank] * 3
print("Setting shape input to {:}. If this is incorrect, for shape input: {:}, please provide tuples for min, opt, and max shapes containing {:} elements".format(shapes[0], inp.name, rank))
min, opt, max = shapes
profile.set_shape_input(inp.name, min, opt, max)
print("Setting shape input: {:} values to min: {:}, opt: {:}, max: {:}".format(inp.name, min, opt, max))
elif -1 in inp.shape:
shapes = get_profile_shape(inp.name)
if not shapes:
shapes = [override_shape(inp.shape)] * 3
print("Overriding dynamic input shape {:} to {:}. If this is incorrect, for input tensor: {:}, please provide tuples for min, opt, and max shapes containing values: {:} with dynamic dimensions replaced,".format(inp.shape, shapes[0], inp.name, inp.shape))
min, opt, max = shapes
profile.set_shape(inp.name, min, opt, max)
print("Setting input: {:} shape to min: {:}, opt: {:}, max: {:}".format(inp.name, min, opt, max))
if not profile:
print("Profile is not valid, please provide profile data. Note: profile was: {:}".format(profile_shapes))
return profile
def preprocess_network(network):
"""
Add quantize and dequantize nodes after the input placeholder.
The scale values are currently picked on emperical basis. Ideally,
you need to add these nodes during quantization aware training and
learn the dynamic ranges of input node.
"""
quant_scale = np.array([1.0/127.0], dtype=np.float32)
dequant_scale = np.array([127.0/1.0], dtype=np.float32)
# Zero point is always zero for quantization in TensorRT.
zeros = np.zeros(shape=(1, ), dtype=np.float32)
for i in range(network.num_inputs):
inp = network.get_input(i)
# Find layer consuming input tensor
found = False
for layer in network:
if found:
break;
for k in range(layer.num_inputs):
if (inp == layer.get_input(k)):
mode = trt.ScaleMode.UNIFORM
quantize = network.add_scale(inp, mode, scale=quant_scale, shift=zeros)
quantize.set_output_type(0, trt.int8)
quantize.name = "InputQuantizeNode"
quantize.get_output(0).name = "QuantizedInput"
dequantize = network.add_scale(quantize.get_output(0), mode, scale=dequant_scale, shift=zeros)
dequantize.set_output_type(0, trt.float32)
dequantize.name = "InputDequantizeNode"
dequantize.get_output(0).name = "DequantizedInput"
layer.set_input(k, dequantize.get_output(0))
found = True
break
def build_engine_onnx(model_file, verbose=False):
"""
Parse the model file through TensorRT, build TRT engine and run inference
"""
# Create builder and network
if verbose:
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
else:
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network_flags = network_flags | (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION))
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(flags=network_flags) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
with open(model_file, 'rb') as model:
if not parser.parse(model.read()):
print ('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print (parser.get_error(error))
return None
# Add quantize and dequantize nodes for input of the network
preprocess_network(network)
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
config.flags = config.flags | 1 << int(trt.BuilderFlag.INT8)
# Setting the (min, opt, max) batch sizes to be 1. Users need to configure this according to their requirements.
config.add_optimization_profile(build_profile(builder, network, profile_shapes={'input' : [(1, 3, 224, 224),(1, 3, 224, 224),(1, 3, 224, 224)]}))
return builder.build_engine(network, config)
def main(args):
model_file = args.onnx
# Parse the ONNX graph through TensorRT and build the engine
trt_engine = build_engine_onnx(model_file, args.verbose)
# Serialize the engine and save to file
with open(args.engine, "wb") as file:
file.write(trt_engine.serialize())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--onnx", type=str, default='rn50.onnx', help="Path to RN50 ONNX graph")
parser.add_argument("--engine", type=str, default='rn50_trt.engine', help="output path to TensorRT engine")
parser.add_argument('-v', '--verbose', action='store_true', help="Flag to enable verbose logging")
args = parser.parse_args()
main(args)
| sampleQAT-master | build_engine.py |
#!/usr/bin/env python3
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import onnx_graphsurgeon as gs
import argparse
import onnx
import numpy as np
def process_transpose_nodes(graph):
"""
This is a workaround to manually transpose the conv weights and remove
the existing transpose nodes. Currently TRT has a limitation when there is
a transpose node as an input to the weights of the conv layer. This utility
would be removed in future releases.
"""
# Find all the transposes before the convolutional nodes
conv_nodes = [node for node in graph.nodes if node.op == "Conv"]
for node in conv_nodes:
# Transpose the convolutional weights and reset them to the weights
conv_weights_tensor = node.i(1).i().i().inputs[0]
conv_weights_transposed = np.transpose(conv_weights_tensor.values, [3, 2, 0, 1])
conv_weights_tensor.values = conv_weights_transposed
# Remove the transpose nodes after the dequant node. TensorRT does not support transpose nodes after QDQ nodes.
dequant_node_output = node.i(1).i(0).outputs[0]
node.inputs[1] = dequant_node_output
# Remove unused nodes, and topologically sort the graph.
return graph.cleanup().toposort()
if __name__=='__main__':
parser = argparse.ArgumentParser("Post process ONNX graph by removing transpose nodes")
parser.add_argument("--input", required=True, help="Input onnx graph")
parser.add_argument("--output", default='postprocessed_rn50.onnx', help="Name of post processed onnx graph")
args = parser.parse_args()
# Load the rn50 graph
graph = gs.import_onnx(onnx.load(args.input))
# Remove the transpose nodes and reshape the convolution weights
graph = process_transpose_nodes(graph)
# Export the onnx graph from graphsurgeon
onnx_model = gs.export_onnx(graph)
print("Output ONNX graph generated: ", args.output)
onnx.save_model(onnx_model, args.output) | sampleQAT-master | postprocess_onnx.py |
#!/usr/bin/env python3
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from tensorflow.core.protobuf import config_pb2, rewriter_config_pb2, meta_graph_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import importer, ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training import saver
def constfold(graphdef, output_name):
graph = ops.Graph()
with graph.as_default():
outputs = output_name.split(',')
output_collection = meta_graph_pb2.CollectionDef()
output_list = output_collection.node_list.value
for output in outputs:
output_list.append(output)
importer.import_graph_def(graphdef, name="")
metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
metagraph.collection_def["train_op"].CopyFrom(output_collection)
rewriter_config = rewriter_config_pb2.RewriterConfig()
rewriter_config.optimizers.extend(["constfold"])
rewriter_config.meta_optimizer_iterations = (rewriter_config_pb2.RewriterConfig.ONE)
session_config = config_pb2.ConfigProto()
session_config.graph_options.rewrite_options.CopyFrom(rewriter_config)
return tf_optimizer.OptimizeGraph(session_config, metagraph)
if __name__ == '__main__':
parser = argparse.ArgumentParser("Folds constants in the provided frozen model")
parser.add_argument("-i", "--input", help="The input frozen model to be constant folded.")
parser.add_argument("--output_node", default="resnet50/output/softmax_1", help="Output node names separated by commas")
parser.add_argument("-o", "--output", default="folded_rn50.pb", help="Path to constant folded output graph")
args, _ = parser.parse_known_args()
with open(args.input, 'rb') as f:
graphdef = graph_pb2.GraphDef()
graphdef.ParseFromString(f.read())
folded_graph = constfold(graphdef, args.output_node)
print("Writing output to {:}".format(args.output))
with open(args.output, "wb") as f:
f.write(folded_graph.SerializeToString())
| sampleQAT-master | fold_constants.py |
#!/usr/bin/env python3
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import PIL.Image
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import image_processing
TRT_DYNAMIC_DIM = -1
def load_normalized_test_case(test_image, pagelocked_buffer, preprocess_func):
# Expected input dimensions
C, H, W = (3, 224, 224)
# Normalize the images, concatenate them and copy to pagelocked memory.
data = np.asarray([preprocess_func(PIL.Image.open(test_image).convert('RGB'), C, H, W)]).flatten()
np.copyto(pagelocked_buffer, data)
class HostDeviceMem(object):
r""" Simple helper data class that's a little nicer to use than a 2-tuple.
"""
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine: trt.ICudaEngine, batch_size: int):
print('Allocating buffers ...')
inputs = []
outputs = []
dbindings = []
stream = cuda.Stream()
for binding in engine:
size = batch_size * abs(trt.volume(engine.get_binding_shape(binding)))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
dbindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, dbindings, stream
def infer(engine_path, preprocess_func, batch_size, input_image, labels=[], verbose=False):
if verbose:
logger = trt.Logger(trt.Logger.VERBOSE)
else:
logger = trt.Logger(trt.Logger.INFO)
with open(engine_path, 'rb') as f, trt.Runtime(logger) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
def override_shape(shape, batch_size):
return tuple([batch_size if dim==TRT_DYNAMIC_DIM else dim for dim in shape])
# Allocate buffers and create a CUDA stream.
inputs, outputs, dbindings, stream = allocate_buffers(engine, batch_size)
# Contexts are used to perform inference.
with engine.create_execution_context() as context:
# Resolve dynamic shapes in the context
for binding in engine:
binding_idx = engine.get_binding_index(binding)
shape = engine.get_binding_shape(binding_idx)
if engine.binding_is_input(binding_idx):
if TRT_DYNAMIC_DIM in shape:
shape = override_shape(shape, batch_size)
context.set_binding_shape(binding_idx, shape)
# Load the test images and preprocess them
load_normalized_test_case(input_image, inputs[0].host, preprocess_func)
# Transfer input data to the GPU.
cuda.memcpy_htod(inputs[0].device, inputs[0].host)
# Run inference.
context.execute(batch_size, dbindings)
# Transfer predictions back to host from GPU
out = outputs[0]
cuda.memcpy_dtoh(out.host, out.device)
softmax_output = np.array(out.host)
top1_idx = np.argmax(softmax_output)
output_class = labels[top1_idx+1]
output_confidence = softmax_output[top1_idx]
print ("Output class of the image: {} Confidence: {}".format(output_class, output_confidence))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run inference on TensorRT engines for Imagenet-based Classification models.')
parser.add_argument('-e', '--engine', type=str, required=True,
help='Path to RN50 TensorRT engine')
parser.add_argument('-i', '--image', required=True, type=str,
help="Path to input image.")
parser.add_argument("-l", "--labels", type=str, default=os.path.join("labels", "class_labels.txt"),
help="Path to file which has imagenet 1k labels.")
parser.add_argument('-b', '--batch_size', default=1, type=int,
help="Batch size of inputs")
parser.add_argument('-v', '--verbose', action='store_true',
help="Flag to enable verbose loggin")
args = parser.parse_args()
# Class 0 is not used and is treated as background class. Renaming it to "background"
with open(args.labels, "r") as f:
background_class = ["background"]
imagenet_synsets = f.read().splitlines()
imagenet_classes=[]
for synset in imagenet_synsets:
class_name = synset.strip()
imagenet_classes.append(class_name)
all_classes = background_class + imagenet_classes
labels = np.array(all_classes)
# Preprocessing for input images
preprocess_func = image_processing.preprocess_resnet50
# Run inference on the test image
infer(args.engine, preprocess_func, args.batch_size, args.image, labels, args.verbose)
| sampleQAT-master | infer.py |
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from PIL import Image
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
_RESIZE_MIN = 256
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
def preprocess_imagenet(image, channels=3, height=224, width=224):
"""Pre-processing for Imagenet-based Image Classification Models:
resnet50, vgg16, mobilenet, etc. (Doesn't seem to work for Inception)
Parameters
----------
image: PIL.Image
The image resulting from PIL.Image.open(filename) to preprocess
channels: int
The number of channels the image has (Usually 1 or 3)
height: int
The desired height of the image (usually 224 for Imagenet data)
width: int
The desired width of the image (usually 224 for Imagenet data)
Returns
-------
img_data: numpy array
The preprocessed image data in the form of a numpy array
"""
# Get the image in CHW format
resized_image = image.resize((width, height), Image.ANTIALIAS)
img_data = np.asarray(resized_image).astype(np.float32)
if len(img_data.shape) == 2:
# For images without a channel dimension, we stack
img_data = np.stack([img_data] * 3)
logger.debug("Received grayscale image. Reshaped to {:}".format(img_data.shape))
else:
img_data = img_data.transpose([2, 0, 1])
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
assert img_data.shape[0] == channels
for i in range(img_data.shape[0]):
# Scale each pixel to [0, 1] and normalize per channel.
img_data[i, :, :] = (img_data[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i]
return img_data
def _smallest_size_at_least(height, width, resize_min):
smaller_dim = np.minimum(float(height), float(width))
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = int(height * scale_ratio)
new_width = int(width * scale_ratio)
return new_height, new_width
def _central_crop(image, crop_height, crop_width):
shape = image.shape
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
cropped_image = image[crop_top:crop_height+crop_top, crop_left:crop_width+crop_left]
return cropped_image
def normalize_inputs(inputs):
num_channels = inputs.shape[-1]
if len(_CHANNEL_MEANS) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
means_per_channel = np.reshape(_CHANNEL_MEANS, [1, 1, num_channels])
# means_per_channel = tf.cast(means_per_channel, dtype=inputs.dtype)
inputs = np.subtract(inputs, means_per_channel)/255.0
return inputs
def preprocess_resnet50(image, channels=3, height=224, width=224):
"""Pre-processing for Imagenet-based Image Classification Models:
resnet50 (resnet_v1_1.5 designed by Nvidia
Parameters
----------
image: PIL.Image
The image resulting from PIL.Image.open(filename) to preprocess
channels: int
The number of channels the image has (Usually 1 or 3)
height: int
The desired height of the image (usually 224 for Imagenet data)
width: int
The desired width of the image (usually 224 for Imagenet data)
Returns
-------
img_data: numpy array
The preprocessed image data in the form of a numpy array
"""
# Get the shape of the image.
w, h= image.size
new_height, new_width = _smallest_size_at_least(h, w, _RESIZE_MIN)
# Image is still in WH format in PIL
resized_image = image.resize((new_width, new_height), Image.BILINEAR)
# Changes to HWC due to numpy
img_data = np.asarray(resized_image).astype(np.float32)
# Do a central crop
cropped_image = _central_crop(img_data, height, width)
assert cropped_image.shape[0] == height
assert cropped_image.shape[1] == width
if len(cropped_image.shape) == 2:
# For images without a channel dimension, we stack
cropped_image = np.stack([cropped_image] * 3)
return cropped_image
# logger.debug("Received grayscale image. Reshaped to {:}".format(cropped_image.shape))
normalized_inputs = normalize_inputs(cropped_image)
cropped_image = np.transpose(normalized_inputs, [2, 0, 1])
return cropped_image
def preprocess_inception(image, channels=3, height=224, width=224):
"""Pre-processing for InceptionV1. Inception expects different pre-processing
than {resnet50, vgg16, mobilenet}. This may not be totally correct,
but it worked for some simple test images.
Parameters
----------
image: PIL.Image
The image resulting from PIL.Image.open(filename) to preprocess
channels: int
The number of channels the image has (Usually 1 or 3)
height: int
The desired height of the image (usually 224 for Imagenet data)
width: int
The desired width of the image (usually 224 for Imagenet data)
Returns
-------
img_data: numpy array
The preprocessed image data in the form of a numpy array
"""
# Get the image in CHW format
resized_image = image.resize((width, height), Image.BILINEAR)
img_data = np.asarray(resized_image).astype(np.float32)
if len(img_data.shape) == 2:
# For images without a channel dimension, we stack
img_data = np.stack([img_data] * 3)
logger.debug("Received grayscale image. Reshaped to {:}".format(img_data.shape))
else:
img_data = img_data.transpose([2, 0, 1])
return img_data
| sampleQAT-master | image_processing.py |
"""
Basic HTTP Multithreaeded Server.
Steps to run:
$ python server.py
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
import argparse
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
class Handler(BaseHTTPRequestHandler):
def log_request(self, code="-", size="-"):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_PUT(self):
self._set_headers()
self.wfile.write(b"Hello World!")
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
self._set_headers()
self.wfile.write(b"Hello World!")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| ais-etl-master | bench/http-server/server.py |
"""
A basic web server using Flask for demonstration purposes.
Steps to run:
$ # with built-in flask server
$ flask --app app run
$ # with gunicorn
$ gunicorn -w 4 'app:app'
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
import logging
from flask import Flask, request
app = Flask(__name__)
@app.route("/", defaults={"path": ""}, methods=["PUT", "GET"])
@app.route("/<path:path>", methods=["PUT", "GET"])
def image_handler(path):
try:
if request.method == "PUT":
# Read the request body
# Transform the bytes
# Return the transformed bytes
transformed_data = b"Hello World!"
return transformed_data, 200
elif request.method == "GET":
# Get the destination/name of the object from the URL or the path variable
# Fetch the object from the AIS target based on the destination/name
# Use request.get(ais_target_url + "/" + path).get to get the object
# Transform the bytes
# Return the transformed bytes
transformed_data = b"Hello World!"
return transformed_data, 200
except Exception as exception:
logging.error("Error processing request: %s", str(exception))
return "Data processing failed", 500
if __name__ == "__main__":
app.run()
| ais-etl-master | bench/flask-server/app.py |
"""
A basic web server using FastAPI for demonstration purposes.
Steps to run:
$ # with uvicorn
$ uvicorn main:app --reload
$ # with multiple uvicorn processes managed by gunicorn
$ gunicorn main:app --workers 4 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
from fastapi import FastAPI, Request
app = FastAPI()
@app.put("/")
@app.put("/{full_path:path}")
async def put_handler(request: Request, full_path: str):
"""
Handles PUT requests.
Reads bytes from the request, performs byte transformation,
and returns the modified bytes.
"""
# Read bytes from request (request.body)
# Transform the bytes
# Return the transformed bytes
return b"Hello World from PUT!"
@app.get("/")
@app.get("/{full_path:path}")
async def get_handler(request: Request, full_path: str):
"""
Handles GET requests.
Retrieves the destination/name of the object from the URL or the full_path variable,
fetches the object from the AIS target based on the destination/name,
transforms the bytes, and returns the modified bytes.
"""
# Get destination/name of object from URL or from full_path variable
# Fetch object from AIS target based on the destination/name
# Perform byte transformation
# Return the transformed bytes
return b"Hello World from GET!"
| ais-etl-master | bench/fast-api/main.py |
"""
Test client for all the webservers.
Steps to run:
$ pip install locust
$ locust
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
from locust import HttpUser, task
class MyTestUser(HttpUser):
@task
def test_put_request(self):
self._perform_put_request()
@task
def test_get_request(self):
self._perform_get_request()
def _perform_put_request(self):
url = "/"
data = "test"
self.client.put(url=url, data=data)
def _perform_get_request(self):
url = "/"
self.client.get(url=url)
| ais-etl-master | bench/client/locustfile.py |
#!/usr/bin/env python
import os
import importlib.util
from typing import Iterator
from inspect import signature
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import requests
host_target = os.environ["AIS_TARGET_URL"]
code_file = os.getenv("MOD_NAME")
arg_type = os.getenv("ARG_TYPE", "bytes")
spec = importlib.util.spec_from_file_location(name="function", location=f"./code/{code_file}.py")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
try:
CHUNK_SIZE = int(os.getenv("CHUNK_SIZE", 0))
except Exception:
CHUNK_SIZE = 0
transform = getattr(mod, os.getenv("FUNC_TRANSFORM"))
def _assert_validations():
transform_params = len(signature(transform).parameters)
if CHUNK_SIZE > 0 and transform_params < 2:
raise ValueError(
"Required to pass context as a parameter to transform if CHUNK_SIZE > 0"
)
class StreamWrapper:
def __init__(self, rfile, content_length, chunk_size):
self._rfile = rfile
self._content_length = content_length
self._chunk_size = chunk_size
self._remaining_length = content_length
def read(self) -> bytes:
return next(self)
def read_all(self) -> bytes:
return self._rfile.read(self._remaining_length)
def __iter__(self) -> Iterator[bytes]:
while self._remaining_length > 0:
read_buffer = (
self._chunk_size
if self._remaining_length >= self._chunk_size
else self._remaining_length
)
self._remaining_length -= read_buffer
yield self._rfile.read(read_buffer)
class Handler(BaseHTTPRequestHandler):
def log_request(self, *args):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
def do_PUT(self):
content_length = int(self.headers["Content-Length"])
reader = StreamWrapper(self.rfile, content_length, CHUNK_SIZE)
if CHUNK_SIZE == 0:
result = transform(reader.read_all())
self._set_headers()
self.wfile.write(result)
return
# TODO: validate if transform takes writer as input
# NOTE: for streaming transforms the writer is expected to write bytes into response as stream.
self._set_headers()
transform(reader, self.wfile)
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
query_path = host_target + self.path
if arg_type == "url":
result = transform(query_path)
else:
input_bytes = requests.get(query_path).content
result = transform(input_bytes)
self._set_headers()
self.wfile.write(result)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="0.0.0.0", port=80):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
_assert_validations()
server.serve_forever()
if __name__ == "__main__":
run(addr="0.0.0.0", port=80)
| ais-etl-master | runtime/python/server.py |
#!/usr/bin/env python
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-docstring, invalid-name
import argparse
import json
import logging
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import ffmpeg
import filetype
import requests
class Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.host_target = os.environ["AIS_TARGET_URL"]
self.ffmpeg_options = json.loads(os.environ["FFMPEG_OPTIONS"])
if not self.ffmpeg_options or not isinstance(self.ffmpeg_options, dict):
raise ValueError("FFMPEG_OPTIONS must be a valid JSON dictionary")
self.ffmpeg_format = self.ffmpeg_options.get("format")
super().__init__(*args, **kwargs)
def log_request(self, code="-", size="-"):
pass
def handle_error(self, error_message):
logging.error(error_message)
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
def _set_headers(self, content_type):
self.send_response(200)
self.send_header("Content-Type", f"{content_type}")
self.end_headers()
def process_data(self, data):
input_stream = ffmpeg.input("pipe:0")
output_stream = ffmpeg.output(input_stream, "pipe:1", **self.ffmpeg_options)
try:
output, _ = ffmpeg.run(
output_stream, input=data, capture_stdout=True, capture_stderr=True
)
self.wfile.write(output)
except ffmpeg.Error as error:
self.handle_error(f"FFMPEG Error: {error.stderr.decode()}")
def handle_request(self, data):
if self.ffmpeg_format:
self._set_headers(content_type=f"audio/{self.ffmpeg_format}")
else:
input_type = filetype.guess(data)
self._set_headers(content_type=str(input_type.mime))
self.ffmpeg_options["format"] = input_type.extension
self.process_data(data)
def do_PUT(self):
try:
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
self.handle_request(post_data)
except Exception as error:
self.handle_error(f"Error processing PUT request: {str(error)}")
def do_GET(self):
try:
if self.path == "/health":
self._set_headers(content_type="text/plain")
self.wfile.write(b"Running")
return
response = requests.get(self.host_target + self.path, timeout=3.05)
self.handle_request(response.content)
except Exception as error:
self.handle_error(f"Error processing GET request: {str(error)}")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
required=False,
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
required=False,
help="Specify the port on which the server listens",
)
parser_args = parser.parse_args()
run(addr=parser_args.listen, port=parser_args.port)
| ais-etl-master | transformers/ffmpeg/server.py |
#!/usr/bin/env python
import argparse
import hashlib
import requests
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
host_target = os.environ['AIS_TARGET_URL']
class Handler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_PUT(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
md5 = hashlib.md5()
md5.update(post_data)
self._set_headers()
self.wfile.write(md5.hexdigest().encode())
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
x = requests.get(host_target + self.path)
md5 = hashlib.md5()
md5.update(x.content)
self._set_headers()
self.wfile.write(md5.hexdigest().encode())
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| ais-etl-master | transformers/md5/server.py |
#!/usr/bin/env python
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import argparse
import bz2
import gzip
import json
import logging
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import requests
host_target = os.environ["AIS_TARGET_URL"]
compress_options = json.loads(os.environ["COMPRESS_OPTIONS"])
if "mode" not in compress_options:
mode = "compress"
else:
mode = compress_options["mode"]
if "compression" not in compress_options:
compression = "gzip"
else:
compression = compress_options["compression"]
class Handler(BaseHTTPRequestHandler):
# Overriding log_request to not log successful requests
def log_request(self, code="-", size="-"):
pass
# Set standard headers for responses
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
def process_data(self, data):
if mode == "compress" and compression == "gzip":
return gzip.compress(data)
if mode == "compress" and compression == "bz2":
return bz2.compress(data)
if mode == "decompress" and compression == "gzip":
return gzip.decompress(data)
if mode == "decompress" and compression == "bz2":
return bz2.decompress(data)
raise ValueError(
f"Unsupported data processing mode ({mode}) or compression algorithm ({compression})"
)
# PUT handler supports `hpush` operation
def do_PUT(self):
try:
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
processed_data = self.process_data(post_data)
self._set_headers()
self.wfile.write(processed_data)
except Exception as exception:
logging.error("Error processing PUT request: %s", str(exception))
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
# GET handler supports `hpull` operation
def do_GET(self):
try:
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
response = requests.get(host_target + self.path)
processed_data = self.process_data(response.content)
self._set_headers()
self.wfile.write(processed_data)
except Exception as exception:
logging.error("Error processing GET request: %s", str(exception))
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr, port):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| ais-etl-master | transformers/compress/server.py |
"""
A simple echo transformation using FastAPI framework and Gunivorn and Uvicorn webserver.
Steps to run:
$ # with uvicorn
$ uvicorn main:app --reload
$ # with multiple uvicorn processes managed by gunicorn
$ gunicorn main:app --workers 4 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring, broad-exception-caught
import os
import urllib.parse
from fastapi import FastAPI, Request, Depends, Response, HTTPException
import aiohttp # async
app = FastAPI()
host_target = os.environ["AIS_TARGET_URL"]
class HttpClient:
session: aiohttp.ClientSession = None
def start(self):
self.session = aiohttp.ClientSession()
async def stop(self):
await self.session.close()
self.session = None
def __call__(self) -> aiohttp.ClientSession:
assert self.session is not None
return self.session
http_client = HttpClient()
@app.on_event("startup")
async def startup():
http_client.start()
@app.get("/health")
async def health():
return b"Running"
@app.get("/")
@app.get("/{full_path:path}", response_class=Response)
async def get_handler(
full_path: str, client: aiohttp.ClientSession = Depends(http_client)
):
"""
Handles GET requests.
Retrieves the destination/name of the object from the URL or the full_path variable,
fetches the object from the AIS target based on the destination/name,
transforms the bytes, and returns the modified bytes.
"""
# Get destination/name of object from URL or from full_path variable
# Fetch object from AIS target based on the destination/name
# Transform the bytes
# Return the transformed bytes
object_path = urllib.parse.quote(full_path, safe="@")
object_url = f"{host_target}/{object_path}"
resp = await client.get(object_url)
if not resp or resp.status != 200:
raise HTTPException(
status_code=500, detail="Error retreiving object ({full_path}) from target"
)
return Response(content=await resp.read(), media_type="application/octet-stream")
@app.put("/")
@app.put("/{full_path:path}", response_class=Response)
async def put_handler(request: Request):
"""
Handles PUT requests.
Reads bytes from the request, performs byte transformation,
and returns the modified bytes.
"""
# Read bytes from request (request.body)
# Transform the bytes
# Return the transformed bytes
return Response(content=await request.body(), media_type="application/octet-stream")
| ais-etl-master | transformers/echo/main.py |
#!/usr/bin/env python
import argparse
import requests
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
host_target = os.environ['AIS_TARGET_URL']
class Handler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
def do_PUT(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
self._set_headers()
self.wfile.write(post_data)
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
self._set_headers()
x = requests.get(host_target + self.path)
self.wfile.write(x.content)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| ais-etl-master | transformers/echo/http-multithreaded-server/server.py |
"""
A simple hello world transformation using FastAPI framework and Gunicorn and Uvicorn webserver.
Steps to run:
$ # with uvicorn
$ uvicorn main:app --reload
$ # with multiple uvicorn processes managed by gunicorn
$ gunicorn main:app --workers 4 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring, broad-exception-caught
import os
import urllib.parse
from fastapi import FastAPI, Request, Depends, Response, HTTPException
import aiohttp # async
# from aistore.sdk.errors import AISError
app = FastAPI()
host_target = os.environ.get("AIS_TARGET_URL")
if not host_target:
raise EnvironmentError("AIS_TARGET_URL environment variable missing")
arg_type = os.getenv("ARG_TYPE", "")
class HttpClient:
session: aiohttp.ClientSession = None
def start(self):
self.session = aiohttp.ClientSession()
async def stop(self):
await self.session.close()
self.session = None
def __call__(self) -> aiohttp.ClientSession:
assert self.session is not None
return self.session
http_client = HttpClient()
@app.on_event("startup")
async def startup():
http_client.start()
@app.get("/health")
async def health():
return b"Running"
@app.get("/")
@app.get("/{full_path:path}", response_class=Response)
async def get_handler(
full_path: str, client: aiohttp.ClientSession = Depends(http_client)
):
"""
Handles `hpull://` and `hrev://` requests.
Retrieves the destination/name of the object from the URL or the full_path variable,
fetches the object from the AIS target based on the destination/name,
transforms the bytes, and returns the modified bytes.
"""
# Get destination/name of object from URL or from full_path variable
if arg_type.lower() == "fqn":
with open(full_path, "rb") as file:
file.read()
else:
object_path = urllib.parse.quote(full_path, safe="@")
object_url = f"{host_target}/{object_path}"
# Fetch object from AIS target based on the destination/name
resp = await client.get(object_url)
if not resp or resp.status != 200:
raise HTTPException(
status_code=500,
detail="Error retreiving object ({full_path}) from target",
)
# Transform the bytes
await resp.read()
return Response(content=b"Hello World!", media_type="application/octet-stream")
@app.put("/")
@app.put("/{full_path:path}", response_class=Response)
async def put_handler(request: Request, full_path: str):
"""
Handles `hpush://` requests.
Reads bytes from the request, performs byte transformation,
and returns the modified bytes.
"""
# Read bytes from request (request.body)
if arg_type.lower() == "fqn":
with open(full_path, "rb") as file:
file.read()
else:
await request.body()
# Transform the bytes
# Return the transformed bytes
return Response(content=b"Hello World!", media_type="application/octet-stream")
| ais-etl-master | transformers/hello_world/main.py |
#!/usr/bin/env python
import argparse
import requests
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
host_target = os.environ['AIS_TARGET_URL']
class Handler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_PUT(self):
content_length = int(self.headers['Content-Length'])
self.rfile.read(content_length)
self._set_headers()
self.wfile.write(b"Hello World!")
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
requests.get(host_target + self.path)
self._set_headers()
self.wfile.write(b"Hello World!")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| ais-etl-master | transformers/hello_world/http-multithreaded-server/server.py |
"""
Transorming images with Keras API using FastAPI framework and Gunivorn and Uvicorn webserver.
Steps to run:
$ # with uvicorn
$ uvicorn main:app --reload
$ # with multiple uvicorn processes managed by gunicorn
$ gunicorn main:app --workers 4 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring, broad-exception-caught
import os
import urllib.parse
import json
import io
import logging
from fastapi import FastAPI, Request, Depends, Response
import aiohttp # async
from keras.preprocessing.image import (
ImageDataGenerator,
load_img,
array_to_img,
img_to_array,
)
app = FastAPI()
# Constants
FORMAT = os.getenv("FORMAT", "JPEG")
ARG_TYPE = os.getenv("ARG_TYPE", "bytes")
# Environment Variables
host_target = os.environ.get("AIS_TARGET_URL")
TRANSFORM = os.environ.get("TRANSFORM")
if not host_target:
raise EnvironmentError("AIS_TARGET_URL environment variable missing")
if not TRANSFORM:
raise EnvironmentError(
"TRANSFORM environment variable missing. Check documentation for examples (link)"
)
transform_dict = json.loads(TRANSFORM)
class HttpClient:
session: aiohttp.ClientSession = None
def start(self):
self.session = aiohttp.ClientSession()
async def stop(self):
await self.session.close()
self.session = None
def __call__(self) -> aiohttp.ClientSession:
assert self.session is not None
return self.session
http_client = HttpClient()
@app.on_event("startup")
async def startup():
http_client.start()
@app.get("/health")
async def health():
return b"Ok"
async def transform_image(data: bytes) -> bytes:
"""Process image data as bytes using the specified transformation."""
try:
img = load_img(io.BytesIO(data))
img = img_to_array(img)
datagen = ImageDataGenerator()
img = datagen.apply_transform(x=img, transform_parameters=transform_dict)
img = array_to_img(img)
buf = io.BytesIO()
img.save(buf, format=FORMAT)
return buf.getvalue()
except Exception as e:
logging.error("Error processing data: %s", str(e))
raise
@app.get("/")
@app.get("/{full_path:path}", response_class=Response)
async def get_handler(
full_path: str, client: aiohttp.ClientSession = Depends(http_client)
):
"""
Handles GET requests.
Retrieves the destination/name of the object from the URL or the full_path variable,
fetches the object from the AIS target based on the destination/name,
transforms the bytes, and returns the modified bytes.
"""
# Get destination/name of object from URL or from full_path variable
# Fetch object from AIS target based on the destination/name
# Transform the bytes
# Return the transformed bytes
object_path = urllib.parse.quote(full_path, safe="@")
object_url = f"{host_target}/{object_path}"
resp = await client.get(object_url)
body = await resp.read()
return Response(
content=await transform_image(body), media_type="application/octet-stream"
)
@app.put("/")
@app.put("/{full_path:path}", response_class=Response)
async def put_handler(request: Request):
"""
Handles PUT requests.
Reads bytes from the request, performs byte transformation,
and returns the modified bytes.
"""
# Read bytes from request (request.body)
# Transform the bytes
# Return the transformed bytes
body = await request.body()
return Response(
content=await transform_image(body), media_type="application/octet-stream"
)
| ais-etl-master | transformers/keras_preprocess/main.py |
#!/usr/bin/env python
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring, broad-exception-caught
import os
import json
import logging
import io
import urllib
import requests
from flask import Flask, request
from keras.preprocessing.image import (
ImageDataGenerator,
load_img,
array_to_img,
img_to_array,
)
app = Flask(__name__)
# Constants
FORMAT = os.getenv("FORMAT", "JPEG")
ARG_TYPE = os.getenv("ARG_TYPE", "bytes")
# Environment Variables
host_target = os.environ.get("AIS_TARGET_URL")
logging.info(host_target)
TRANSFORM = os.environ.get("TRANSFORM")
if not host_target:
raise EnvironmentError("AIS_TARGET_URL environment variable missing")
if not TRANSFORM:
raise EnvironmentError(
"TRANSFORM environment variable missing. Check documentation for examples (link)"
)
transform_dict = json.loads(TRANSFORM)
def transform_image(data: bytes) -> bytes:
"""Process image data as bytes using the specified transformation."""
try:
img = load_img(io.BytesIO(data))
img = img_to_array(img)
datagen = ImageDataGenerator()
img = datagen.apply_transform(x=img, transform_parameters=transform_dict)
img = array_to_img(img)
buf = io.BytesIO()
img.save(buf, format=FORMAT)
return buf.getvalue()
except Exception as exp:
logging.error("Error processing data in transform_image: %s", str(exp))
raise exp
@app.route("/health")
def health_check():
return "Running"
@app.route("/", defaults={"path": ""}, methods=["PUT", "GET"])
@app.route("/<path:path>", methods=["PUT", "GET"])
def image_handler(path: str): # pylint: disable=unused-argument
try:
if request.method == "PUT":
post_data = request.data
processed_data = transform_image(post_data)
if processed_data is not None:
return processed_data, 200
return "Data processing failed", 500
if request.method == "GET":
if ARG_TYPE == "url":
# webdataset
query_path = request.args.get("url")
result = transform_image(requests.get(query_path, timeout=5).content)
else:
# normal GET - hpull and hrev
object_path = urllib.parse.quote(path, safe="@")
object_url = f"{host_target}/{object_path}"
resp = requests.get(object_url, timeout=5)
if resp.status_code != 200:
raise FileNotFoundError(f"Error getting '{path}' from '{host_target}'")
result = transform_image(resp.content)
if result is not None:
return result, 200
return "Data processing failed", 500
except Exception as exp:
logging.error("Error processing request: %s", str(exp))
return "Data processing failed", 500
| ais-etl-master | transformers/keras_preprocess/flask-gunicorn/app.py |
#!/usr/bin/env python
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import os
import json
import logging
import requests
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import io
from keras.preprocessing.image import (
ImageDataGenerator,
load_img,
array_to_img,
img_to_array,
)
# Constants
FORMAT = os.getenv("FORMAT", "JPEG")
ARG_TYPE = os.getenv("ARG_TYPE", "bytes")
# Environment Variables
host_target = os.environ.get("AIS_TARGET_URL")
TRANSFORM = os.environ.get("TRANSFORM")
if not host_target:
raise EnvironmentError("AIS_TARGET_URL environment variable missing")
if not TRANSFORM:
raise EnvironmentError(
"TRANSFORM environment variable missing. Check documentation for examples (link)"
)
transform_dict = json.loads(TRANSFORM)
class Handler(BaseHTTPRequestHandler):
def log_request(self, code="-", size="-"):
"""Override log_request to not log successful requests."""
pass
def _set_headers(self):
"""Set standard headers for responses."""
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
def transform(self, data: bytes) -> bytes:
"""Process image data as bytes using the specified transformation."""
try:
img = load_img(io.BytesIO(data))
img = img_to_array(img)
datagen = ImageDataGenerator()
img = datagen.apply_transform(x=img, transform_parameters=transform_dict)
img = array_to_img(img)
buf = io.BytesIO()
img.save(buf, format=FORMAT)
return buf.getvalue()
except Exception as e:
logging.error("Error processing data: %s", str(e))
raise
def do_PUT(self):
"""PUT handler supports `hpush` operation."""
try:
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
processed_data = self.transform(post_data)
if processed_data is not None:
self._set_headers()
self.wfile.write(processed_data)
else:
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
except Exception as e:
logging.error("Error processing PUT request: %s", str(e))
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
def do_GET(self):
"""GET handler supports `hpull` operation."""
try:
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
query_path = host_target + self.path
if ARG_TYPE == "url": # need this for webdataset
result = self.transform(query_path)
else:
input_bytes = requests.get(query_path).content
result = self.transform(input_bytes)
if result is not None:
self._set_headers()
self.wfile.write(result)
else:
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
except Exception as e:
logging.error("Error processing GET request: %s", str(e))
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="0.0.0.0", port=80):
server = ThreadedHTTPServer((addr, port), Handler)
logging.info(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
run(addr="0.0.0.0", port=80)
| ais-etl-master | transformers/keras_preprocess/http-multithreaded-server/server.py |
"""
Stress testing Keras Transformer for 50k images for all communication types
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import logging
from datetime import datetime
from tests.base import TestBase
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import KERAS_TRANSFORMER
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
class TestKerasStress(TestBase):
def setUp(self):
super().setUp()
# Keep this bucket
self.images_bck = self.client.bucket(bck_name="stress-test-images")
def run_test(self, comm_type: str, func_name: str):
template = KERAS_TRANSFORMER.format(
communication_type=comm_type,
format="JPEG",
transform='{"theta":40, "brightness":0.8, "zx":0.9, "zy":0.9}',
)
self.test_etl.init_spec(template=template, communication_type=comm_type)
start_time = datetime.now()
job_id = self.images_bck.transform(
etl_name=self.test_etl.name,
timeout="30m",
to_bck=self.test_bck,
ext={"JPEG": "JPEG"},
)
self.client.job(job_id).wait(timeout=1800)
time_elapsed = datetime.now() - start_time
job_status = self.client.job(job_id).status()
self.assertEqual(job_status.err, "")
self.assertEqual(
len(self.images_bck.list_all_objects()),
len(self.test_bck.list_all_objects()),
)
logger.info("%s %s", func_name, time_elapsed)
with open("metrics.txt", "a+", encoding="utf-8") as file:
file.write(f"{func_name} {time_elapsed}\n")
def test_keras_hpush_fastapi(self):
self.run_test(ETL_COMM_HPUSH, "test_keras_hpush_fastapi")
def test_keras_hpull_fastapi(self):
self.run_test(ETL_COMM_HPULL, "test_keras_hpull_fastapi")
def test_keras_hrev_fastapi(self):
self.run_test(ETL_COMM_HREV, "test_keras_hrev_fastapi")
| ais-etl-master | transformers/tests/test_keras_stress.py |
"""
Stress testing Hello World Transformer for 1 Million objects for all communication types
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import logging
from datetime import datetime
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import HELLO_WORLD
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
FQN = """
apiVersion: v1
kind: Pod
metadata:
name: transformer-hello-world
annotations:
communication_type: "hpull://"
wait_timeout: 5m
spec:
containers:
- name: server
image: aistorage/transformer_hello_world:test
imagePullPolicy: Always
ports:
- name: default
containerPort: 8000
command: ["gunicorn", "main:app", "--workers", "20", "--worker-class", "uvicorn.workers.UvicornWorker", "--bind", "0.0.0.0:8000"]
# command: ["uvicorn", "main:app", "--reload"]
env:
- name: ARG_TYPE
value: "fqn"
readinessProbe:
httpGet:
path: /health
port: default
volumeMounts:
- name: ais
mountPath: /tmp/
volumes:
- name: ais
hostPath:
path: /tmp/
type: Directory
"""
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
class TestHelloWorldStress(TestBase):
def setUp(self):
super().setUp()
# dont delete this bucket
self.images_bck = self.client.bucket(bck_name="stress-test-objects")
def test_hello_world_hpush_fastapi(self):
self.run_test(ETL_COMM_HPUSH, "test_hello_world_hpush_fastapi")
def test_hello_world_hpull_fastapi(self):
self.run_test(ETL_COMM_HPULL, "test_hello_world_hpull_fastapi")
def test_hello_world_hrev_fastapi(self):
self.run_test(ETL_COMM_HREV, "test_hello_world_hrev_fastapi")
def test_hello_world_hpull_fastapi_fqn(self):
self.run_test(
ETL_COMM_HPULL, "test_hello_world_hpull_fastapi_fqn", arg_type="fqn"
)
def test_hello_world_hpush_fastapi_fqn(self):
self.run_test(
ETL_COMM_HPUSH, "test_hello_world_hpush_fastapi_fqn", arg_type="fqn"
)
def run_test(self, comm_type: str, func_name: str, arg_type: str = ""):
template = HELLO_WORLD.format(communication_type=comm_type)
if arg_type.lower() == "fqn":
template = FQN
template = git_test_mode_format_image_tag_test(template, "hello_world")
self.test_etl.init_spec(
template=template, communication_type=comm_type, arg_type=arg_type
)
logger.info(self.test_etl.view())
start_time = datetime.now()
job_id = self.images_bck.transform(
etl_name=self.test_etl.name, timeout="5m", to_bck=self.test_bck
)
self.client.job(job_id).wait(timeout=600, verbose=False)
time_elapsed = datetime.now() - start_time
self.assertEqual(self.client.job(job_id).status().err, "")
self.assertEqual(
len(self.images_bck.list_all_objects()),
len(self.test_bck.list_all_objects()),
)
logger.info("%s %s", func_name, time_elapsed)
with open("metrics.txt", "a+", encoding="utf-8") as file:
file.write(f"{func_name} {time_elapsed}\n")
| ais-etl-master | transformers/tests/test_hello_world_stress.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import io
import os
import unittest
from PIL import Image
from torchvision import transforms
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import TORCHVISION_TRANSFORMER
class TestTransformers(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
def simple_torchvision_test(self, communication_type):
template = TORCHVISION_TRANSFORMER.format(
communication_type=communication_type,
transform='{"Resize": {"size": [100, 100]}, "Grayscale": {"num_output_channels": 1}}',
format="JPEG",
)
if self.git_test_mode:
template = git_test_mode_format_image_tag_test(template, "torchvision")
# Transform via AIStore
self.test_etl.init_spec(
template=template, communication_type=communication_type, timeout="10m"
)
etl_transformed_image_bytes = (
self.test_bck.object(self.test_image_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
# Transform via Locally
transform = transforms.Compose(
[
transforms.Resize((100, 100)), # Resize the image to 100x100 pixels
transforms.Grayscale(
num_output_channels=1
), # Convert the image to grayscale
]
)
image = Image.open("./resources/test-image.jpg")
tensor = transforms.ToTensor()(image)
transformed_tensor = transform(tensor)
transformed_image = transforms.ToPILImage()(transformed_tensor)
byte_arr = io.BytesIO()
transformed_image.save(byte_arr, format="JPEG")
transformed_image_bytes = byte_arr.getvalue()
# Compare Results of Separate Transforms
self.assertEqual(transformed_image_bytes, etl_transformed_image_bytes)
@unittest.skipIf(
os.getenv("TORCHVISION_ENABLE", "true") == "false",
"TORCHVISION_PREPROCESS is diabled",
)
def test_torch_transformer_simple_hpull(self):
self.simple_torchvision_test(ETL_COMM_HPULL)
@unittest.skipIf(
os.getenv("TORCHVISION_ENABLE", "true") == "false",
"TORCHVISION_PREPROCESS is diabled",
)
def test_torch_transformer_simple_hpush(self):
self.simple_torchvision_test(ETL_COMM_HPUSH)
@unittest.skipIf(
os.getenv("TORCHVISION_ENABLE", "true") == "false",
"TORCHVISION_PREPROCESS is diabled",
)
def test_torch_transformer_simple_hrev(self):
self.simple_torchvision_test(ETL_COMM_HREV)
| ais-etl-master | transformers/tests/test_torchvision_transformer.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import logging
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import HELLO_WORLD
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
FQN = """
apiVersion: v1
kind: Pod
metadata:
name: transformer-hello-world
annotations:
communication_type: "hpull://"
wait_timeout: 5m
spec:
containers:
- name: server
image: aistorage/transformer_hello_world:test
imagePullPolicy: Always
ports:
- name: default
containerPort: 8000
command: ["gunicorn", "main:app", "--workers", "20", "--worker-class", "uvicorn.workers.UvicornWorker", "--bind", "0.0.0.0:8000"]
# command: ["uvicorn", "main:app", "--reload"]
env:
- name: ARG_TYPE
value: "fqn"
readinessProbe:
httpGet:
path: /health
port: default
volumeMounts:
- name: ais
mountPath: /tmp/
volumes:
- name: ais
hostPath:
path: /tmp/
type: Directory
"""
class TestHelloWorldTransformer(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_text_filename = "test-text.txt"
self.test_text_source = "./resources/test-text.txt"
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
self.test_bck.object(self.test_text_filename).put_file(self.test_text_source)
def compare_transformed_data_with_hello_world(self, filename: str):
transformed_data_bytes = (
self.test_bck.object(filename).get(etl_name=self.test_etl.name).read_all()
)
self.assertEqual(b"Hello World!", transformed_data_bytes)
def run_hello_world_test(self, communication_type: str, fqn_flag: bool = False):
template = HELLO_WORLD.format(communication_type=communication_type)
arg_type = "fqn" if fqn_flag else ""
if fqn_flag:
template = FQN
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "hello_world")
self.test_etl.init_spec(
template=template, communication_type=communication_type, arg_type=arg_type
)
logger.info(self.test_etl.view())
self.compare_transformed_data_with_hello_world(self.test_image_filename)
self.compare_transformed_data_with_hello_world(self.test_text_filename)
def test_hello_world_hpull(self):
self.run_hello_world_test(ETL_COMM_HPULL)
def test_hello_world_hpush(self):
self.run_hello_world_test(ETL_COMM_HPUSH)
def test_hello_world_hrev(self):
self.run_hello_world_test(ETL_COMM_HREV)
def test_hello_world_hpull_fqn(self):
self.run_hello_world_test(ETL_COMM_HPULL, True)
def test_hello_world_hpush_fqn(self):
self.run_hello_world_test(ETL_COMM_HPUSH, True)
| ais-etl-master | transformers/tests/test_hello_world.py |
ais-etl-master | transformers/tests/__init__.py |
|
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import json
import os
import shutil
import tarfile
import unittest
import numpy as np
import tensorflow as tf
from PIL import Image
from skimage.metrics import structural_similarity as ssim
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
from aistore.sdk.etl_const import ETL_COMM_HREV
from aistore.sdk.etl_templates import TAR2TF
class TestTar2TFTransformer(TestBase):
def setUp(self):
super().setUp()
self.test_tar_filename = "test-tar-single.tar"
self.test_tar_source = "./resources/test-tar-single.tar"
self.test_tfrecord_filename = "test-tar-single.tfrecord"
self.test_bck.object(self.test_tar_filename).put_file(self.test_tar_source)
def tearDown(self):
file_path = "./test.tfrecord"
os.remove(file_path)
dir_path = "./tmp/"
shutil.rmtree(dir_path)
super().tearDown()
@unittest.skipIf(
os.getenv("TAR2TF_ENABLE", "true") == "false", "TAR2TF is disabled"
)
def test_tar2tf_simple(self):
template = TAR2TF.format(communication_type=ETL_COMM_HREV, arg="", val="")
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "tar2tf")
self.test_etl.init_spec(communication_type=ETL_COMM_HREV, template=template)
tfrecord_bytes = (
self.test_bck.object(self.test_tar_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
tfrecord_filename = "test.tfrecord"
with open(tfrecord_filename, "wb") as f:
f.write(tfrecord_bytes)
tfrecord = next(iter(tf.data.TFRecordDataset([tfrecord_filename])))
example = tf.train.Example()
example.ParseFromString(tfrecord.numpy())
cls = example.features.feature["cls"].bytes_list.value[0]
cls = cls.decode("utf-8")
transformed_img = example.features.feature["png"].bytes_list.value[0]
transformed_img = tf.image.decode_image(transformed_img)
with tarfile.open(self.test_tar_source, "r") as tar:
tar.extractall(path="./tmp")
original_img = Image.open("./tmp/tar-single/0001.png")
original_img_tensor = tf.convert_to_tensor(np.array(original_img))
with open("./tmp/tar-single/0001.cls", "r", encoding="utf-8") as file:
original_cls = file.read().strip()
self.assertTrue(
np.array_equal(transformed_img.numpy(), original_img_tensor.numpy())
)
self.assertEqual(cls, original_cls)
@unittest.skipIf(
os.getenv("TAR2TF_ENABLE", "true") == "false", "TAR2TF is disabled"
)
def test_tar2tf_rotation(self):
spec = {
"conversions": [
{"type": "Decode", "ext_name": "png"},
{"type": "Rotate", "ext_name": "png", "angle": 30},
],
"selections": [{"ext_name": "png"}, {"ext_name": "cls"}],
}
spec = json.dumps(spec)
template = TAR2TF.format(
communication_type=ETL_COMM_HREV, arg="-spec", val=spec
)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "tar2tf")
self.test_etl.init_spec(template=template, communication_type=ETL_COMM_HREV)
tfrecord_bytes = (
self.test_bck.object(self.test_tar_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
tfrecord_filename = "test.tfrecord"
with open(tfrecord_filename, "wb") as file:
file.write(tfrecord_bytes)
tfrecord = tf.data.TFRecordDataset([tfrecord_filename])
raw_record = next(iter(tfrecord))
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
cls = example.features.feature["cls"].bytes_list.value[0]
cls = cls.decode("utf-8")
transformed_img = example.features.feature["png"].bytes_list.value[0]
transformed_img = tf.image.decode_image(transformed_img)
with tarfile.open(self.test_tar_source, "r") as tar:
tar.extractall(path="./tmp")
original_img = Image.open("./tmp/tar-single/0001.png").rotate(
angle=30, expand=True, fillcolor=(0, 0, 0)
)
original_img_tensor = tf.convert_to_tensor(np.array(original_img))
with open("./tmp/tar-single/0001.cls", "r", encoding="utf-8") as file:
original_cls = file.read().strip()
# Ensure both images have the same dimensions
transformed_img = tf.image.resize(
transformed_img, original_img_tensor.shape[:2]
)
# Calculate the SSIM
score, _ = ssim(
transformed_img.numpy(),
original_img_tensor.numpy(),
full=True,
multichannel=True,
win_size=3,
data_range=255,
)
# Assuming we consider images with SSIM > 0.99 as visually identical
self.assertTrue(score > 0.99)
self.assertEqual(cls, original_cls)
| ais-etl-master | transformers/tests/test_tar2tf.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import random
import string
import yaml
def generate_random_str():
return "".join(random.choice(string.ascii_lowercase) for i in range(5))
def git_test_mode_format_image_tag_test(template, img):
template = yaml.safe_load(template)
template["spec"]["containers"][0]["image"] = f"aistorage/transformer_{img}:test"
return yaml.dump(template)
| ais-etl-master | transformers/tests/utils.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import os
import unittest
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
from aistore.sdk.etl_const import ETL_COMM_HPULL
from aistore.sdk.etl_templates import GO_ECHO
class TestGoEchoTransformer(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_text_filename = "test-text.txt"
self.test_text_source = "./resources/test-text.txt"
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
self.test_bck.object(self.test_text_filename).put_file(self.test_text_source)
@unittest.skipIf(
os.getenv("GO_ECHO_ENABLE", "true") == "false", "GO_ECHO is disabled"
)
def test_go_echo(self):
template = GO_ECHO.format(communication_type=ETL_COMM_HPULL)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "echo_go")
self.test_etl.init_spec(template=template, communication_type=ETL_COMM_HPULL)
transformed_image_bytes = (
self.test_bck.object(self.test_image_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
transformed_text_bytes = (
self.test_bck.object(self.test_text_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
# Compare image content
with open(self.test_image_source, "rb") as file:
original_image_content = file.read()
self.assertEqual(transformed_image_bytes, original_image_content)
# Compare text content
with open(self.test_text_source, "r", encoding="utf-8") as file:
original_text_content = file.read()
self.assertEqual(transformed_text_bytes.decode("utf-8"), original_text_content)
| ais-etl-master | transformers/tests/test_go_echo.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import hashlib
import os
import unittest
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import MD5
from tests.utils import git_test_mode_format_image_tag_test
from tests.base import TestBase
class TestMD5Transformer(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_text_filename = "test-text.txt"
self.test_text_source = "./resources/test-text.txt"
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
self.test_bck.object(self.test_text_filename).put_file(self.test_text_source)
def md5_hash_file(self, filepath):
with open(filepath, "rb") as file:
file_content = file.read()
return hashlib.md5(file_content).hexdigest()
def compare_transformed_data_with_md5_hash(self, filename, original_filepath):
transformed_data_bytes = (
self.test_bck.object(filename).get(etl_name=self.test_etl.name).read_all()
)
original_file_hash = self.md5_hash_file(original_filepath)
self.assertEqual(transformed_data_bytes.decode("utf-8"), original_file_hash)
def run_md5_test(self, communication_type):
template = MD5.format(communication_type=communication_type)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "md5")
self.test_etl.init_spec(
template=template, communication_type=communication_type
)
self.compare_transformed_data_with_md5_hash(
self.test_image_filename, self.test_image_source
)
self.compare_transformed_data_with_md5_hash(
self.test_text_filename, self.test_text_source
)
@unittest.skipIf(os.getenv("MD5_ENABLE", "true") == "false", "MD5 is disabled")
def test_md5_hpull(self):
self.run_md5_test(ETL_COMM_HPULL)
@unittest.skipIf(os.getenv("MD5_ENABLE", "true") == "false", "MD5 is disabled")
def test_md5_hpush(self):
self.run_md5_test(ETL_COMM_HPUSH)
@unittest.skipIf(os.getenv("MD5_ENABLE", "true") == "false", "MD5 is disabled")
def test_md5_hrev(self):
self.run_md5_test(ETL_COMM_HREV)
| ais-etl-master | transformers/tests/test_md5.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import os
import unittest
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import ECHO
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
class TestEchoTransformer(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_text_filename = "test-text.txt"
self.test_text_source = "./resources/test-text.txt"
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
self.test_bck.object(self.test_text_filename).put_file(self.test_text_source)
def initialize_template(self, communication_type: str):
template = ECHO.format(communication_type=communication_type)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "echo")
self.test_etl.init_spec(
template=template, communication_type=communication_type
)
def compare_transformed_data(self, filename: str, source: str):
transformed_bytes = (
self.test_bck.object(filename).get(etl_name=self.test_etl.name).read_all()
)
with open(source, "rb") as file:
original_content = file.read()
self.assertEqual(transformed_bytes, original_content)
@unittest.skipIf(os.getenv("ECHO_ENABLE", "true") == "false", "ECHO is disabled")
def test_echo_hpull(self):
self.initialize_template(ETL_COMM_HPULL)
self.compare_transformed_data(self.test_image_filename, self.test_image_source)
self.compare_transformed_data(self.test_text_filename, self.test_text_source)
@unittest.skipIf(os.getenv("ECHO_ENABLE", "true") == "false", "ECHO is disabled")
def test_echo_hpush(self):
self.initialize_template(ETL_COMM_HPUSH)
self.compare_transformed_data(self.test_image_filename, self.test_image_source)
self.compare_transformed_data(self.test_text_filename, self.test_text_source)
@unittest.skipIf(os.getenv("ECHO_ENABLE", "true") == "false", "ECHO is disabled")
def test_echo_hrev(self):
self.initialize_template(ETL_COMM_HREV)
self.compare_transformed_data(self.test_image_filename, self.test_image_source)
self.compare_transformed_data(self.test_text_filename, self.test_text_source)
| ais-etl-master | transformers/tests/test_echo.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import json
import os
import unittest
import ffmpeg
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import FFMPEG
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
class TestFFMPEGTransformer(TestBase):
def decode_data(self, data, **kwargs):
input_stream = ffmpeg.input("pipe:0")
output_stream = ffmpeg.output(input_stream, "pipe:1", **kwargs)
out, _ = ffmpeg.run(
output_stream, input=data, capture_stdout=True, capture_stderr=True
)
return out
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_from_wav_to_flac_hpull(self):
self.run_ffmpeg_test(
ETL_COMM_HPULL,
"test-audio-wav.wav",
"./resources/test-audio-wav.wav",
{"format": "flac", "ar": 48000, "ac": 2},
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_from_mp3_to_wav_hpull(self):
self.run_ffmpeg_test(
ETL_COMM_HPULL,
"test-audio-mp3.mp3",
"./resources/test-audio-mp3.mp3",
{"format": "wav", "ar": 44100, "ac": 2, "af": "loudnorm"},
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_format_autodetection_hpull(self):
test_filename = "test-audio-wav.wav"
test_source = "./resources/test-audio-wav.wav"
_, extension = os.path.splitext(test_filename)
file_format = extension[1:]
self.run_ffmpeg_test(
ETL_COMM_HPULL,
test_filename,
test_source,
{"acodec": "pcm_s16le"},
autodetect_format=file_format,
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_from_wav_to_flac_hpush(self):
self.run_ffmpeg_test(
ETL_COMM_HPUSH,
"test-audio-wav.wav",
"./resources/test-audio-wav.wav",
{"format": "flac", "ar": 48000, "ac": 2},
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_from_mp3_to_wav_hpush(self):
self.run_ffmpeg_test(
ETL_COMM_HPUSH,
"test-audio-mp3.mp3",
"./resources/test-audio-mp3.mp3",
{"format": "wav", "ar": 44100, "ac": 2, "af": "loudnorm"},
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_format_autodetection_hpush(self):
test_filename = "test-audio-wav.wav"
test_source = "./resources/test-audio-wav.wav"
_, extension = os.path.splitext(test_filename)
file_format = extension[1:]
self.run_ffmpeg_test(
ETL_COMM_HPUSH,
test_filename,
test_source,
{"acodec": "pcm_s16le"},
autodetect_format=file_format,
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_from_wav_to_flac_hrev(self):
self.run_ffmpeg_test(
ETL_COMM_HREV,
"test-audio-wav.wav",
"./resources/test-audio-wav.wav",
{"format": "flac", "ar": 48000, "ac": 2},
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_from_mp3_to_wav_hrev(self):
self.run_ffmpeg_test(
ETL_COMM_HREV,
"test-audio-mp3.mp3",
"./resources/test-audio-mp3.mp3",
{"format": "wav", "ar": 44100, "ac": 2, "af": "loudnorm"},
)
@unittest.skipIf(
os.getenv("FFMPEG_ENABLE", "true") == "false", "FFMPEG is disabled"
)
def test_ffmpeg_format_autodetection_hrev(self):
test_filename = "test-audio-wav.wav"
test_source = "./resources/test-audio-wav.wav"
_, extension = os.path.splitext(test_filename)
file_format = extension[1:]
self.run_ffmpeg_test(
ETL_COMM_HREV,
test_filename,
test_source,
{"acodec": "pcm_s16le"},
autodetect_format=file_format,
)
# pylint: disable=too-many-arguments
def run_ffmpeg_test(
self,
communication_type,
test_filename,
test_source,
ffmpeg_options,
autodetect_format=None,
):
self.test_bck.object(test_filename).put_file(test_source)
if autodetect_format is not None:
ffmpeg_options["format"] = autodetect_format
template = FFMPEG.format(
communication_type=communication_type,
ffmpeg_options=json.dumps(ffmpeg_options),
)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "ffmpeg")
self.test_etl.init_spec(
template=template, communication_type=communication_type
)
etl_transformed_content = (
self.test_bck.object(test_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
with open(test_source, "rb") as file:
original_audio_content = file.read()
local_transformed_content = self.decode_data(
original_audio_content, **ffmpeg_options
)
self.assertEqual(local_transformed_content, etl_transformed_content)
| ais-etl-master | transformers/tests/test_ffmpeg.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import unittest
import io
import os
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
from keras.preprocessing.image import (
ImageDataGenerator,
load_img,
array_to_img,
img_to_array,
)
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import KERAS_TRANSFORMER
class TestTransformers(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
def get_transformed_image_local(self) -> bytes:
# transformed image - local
img = load_img(self.test_image_source)
img = img_to_array(img)
datagen = ImageDataGenerator()
rotate = datagen.apply_transform(
x=img,
transform_parameters={"theta": 40, "brightness": 0.8, "zx": 0.9, "zy": 0.9},
)
img = array_to_img(rotate)
buf = io.BytesIO()
img.save(buf, format="JPEG")
return buf.getvalue()
def get_template(self, comm_type: str) -> str:
template = KERAS_TRANSFORMER.format(
communication_type=comm_type,
format="JPEG",
transform='{"theta":40, "brightness":0.8, "zx":0.9, "zy":0.9}',
)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "keras")
return template
@unittest.skipIf(
os.getenv("KERAS_ENABLE", "true") == "false",
"Keras image was not built, skipping keras test",
)
def test_keras_transformer_hpull(self):
self.test_etl.init_spec(template=self.get_template(ETL_COMM_HPULL))
transformed_image_etl = (
self.test_bck.object(self.test_image_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
self.assertEqual(self.get_transformed_image_local(), transformed_image_etl)
@unittest.skipIf(
os.getenv("KERAS_ENABLE", "true") == "false",
"Keras image was not built, skipping keras test",
)
def test_keras_transformer_hrev(self):
self.test_etl.init_spec(template=self.get_template(ETL_COMM_HREV))
transformed_image_etl = (
self.test_bck.object(self.test_image_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
self.assertEqual(self.get_transformed_image_local(), transformed_image_etl)
@unittest.skipIf(
os.getenv("KERAS_ENABLE", "true") == "false",
"Keras image was not built, skipping keras test",
)
def test_keras_transformer_hpush(self):
self.test_etl.init_spec(template=self.get_template(ETL_COMM_HPUSH))
transformed_image_etl = (
self.test_bck.object(self.test_image_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
self.assertEqual(self.get_transformed_image_local(), transformed_image_etl)
| ais-etl-master | transformers/tests/test_keras_transformer.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import bz2
import gzip
import json
import os
import unittest
from aistore.sdk.etl_const import ETL_COMM_HPULL, ETL_COMM_HPUSH, ETL_COMM_HREV
from aistore.sdk.etl_templates import COMPRESS
from tests.base import TestBase
from tests.utils import git_test_mode_format_image_tag_test
class TestCompressTransformer(TestBase):
def setUp(self):
super().setUp()
self.test_image_filename = "test-image.jpg"
self.test_image_source = "./resources/test-image.jpg"
self.test_text_filename = "test-text.txt"
self.test_text_source = "./resources/test-text.txt"
self.test_image_gz_filename = "test-image.jpg.gz"
self.test_image_gz_source = "./resources/test-image.jpg.gz"
self.test_text_gz_filename = "test-text.txt.gz"
self.test_text_gz_source = "./resources/test-text.txt.gz"
self.test_image_bz2_filename = "test-image.jpg.bz2"
self.test_image_bz2_source = "./resources/test-image.jpg.bz2"
self.test_text_bz2_filename = "test-text.txt.bz2"
self.test_text_bz2_source = "./resources/test-text.txt.bz2"
self.test_text_bz2_filename = "test-text.txt.bz2"
self.test_text_bz2_source = "./resources/test-text.txt.bz2"
def _get_compression_algorithm(self, compress_options):
if compress_options.get("compression") == "bz2":
algorithm = bz2
else:
algorithm = gzip
return algorithm
def _compress_test_helper(self, communication_type, compress_options):
algorithm = self._get_compression_algorithm(compress_options)
self.test_bck.object(self.test_image_filename).put_file(self.test_image_source)
self.test_bck.object(self.test_text_filename).put_file(self.test_text_source)
compress_options = json.dumps(compress_options)
template = COMPRESS.format(
communication_type=communication_type, compress_options=compress_options
)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "compress")
self.test_etl.init_spec(
template=template, communication_type=communication_type
)
etl_compressed_img = (
self.test_bck.object(self.test_image_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
etl_compressed_txt = (
self.test_bck.object(self.test_text_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
with open(self.test_image_source, "rb") as file:
original_image_content = file.read()
with open(self.test_text_source, "r", encoding="utf-8") as file:
original_text_content = file.read()
self.assertEqual(
algorithm.decompress(etl_compressed_img), original_image_content
)
self.assertEqual(
original_text_content,
algorithm.decompress(etl_compressed_txt).decode("utf-8"),
)
def _decompress_test_helper(self, communication_type, compress_options):
algorithm = self._get_compression_algorithm(compress_options)
if algorithm == bz2:
self.test_bck.object(self.test_image_bz2_filename).put_file(
self.test_image_bz2_source
)
self.test_bck.object(self.test_text_bz2_filename).put_file(
self.test_text_bz2_source
)
compress_options = json.dumps(compress_options)
template = COMPRESS.format(
communication_type=communication_type, compress_options=compress_options
)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "compress")
self.test_etl.init_spec(
template=template, communication_type=communication_type
)
etl_decompressed_img = (
self.test_bck.object(self.test_image_bz2_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
etl_decompressed_txt = (
self.test_bck.object(self.test_text_bz2_filename)
.get(etl_name=self.test_etl.name)
.read_all()
.decode("utf-8")
)
elif algorithm == gzip:
self.test_bck.object(self.test_image_gz_filename).put_file(
self.test_image_gz_source
)
self.test_bck.object(self.test_text_gz_filename).put_file(
self.test_text_gz_source
)
compress_options = json.dumps(compress_options)
template = COMPRESS.format(
communication_type=communication_type, compress_options=compress_options
)
if self.git_test_mode == "true":
template = git_test_mode_format_image_tag_test(template, "compress")
self.test_etl.init_spec(
template=template, communication_type=communication_type
)
etl_decompressed_img = (
self.test_bck.object(self.test_image_gz_filename)
.get(etl_name=self.test_etl.name)
.read_all()
)
etl_decompressed_txt = (
self.test_bck.object(self.test_text_gz_filename)
.get(etl_name=self.test_etl.name)
.read_all()
.decode("utf-8")
)
else:
raise ValueError("Unexpected compression algorithm")
with open(self.test_image_source, "rb") as file:
original_image_content = file.read()
with open(self.test_text_source, "r", encoding="utf-8") as file:
original_text_content = file.read()
self.assertEqual(original_image_content, etl_decompressed_img)
self.assertEqual(original_text_content, etl_decompressed_txt)
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_default_compress_hpull(self):
self._compress_test_helper(ETL_COMM_HPULL, {})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_default_compress_hpush(self):
self._compress_test_helper(ETL_COMM_HPUSH, {})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_default_compress_hrev(self):
self._compress_test_helper(ETL_COMM_HREV, {})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_gzip_compress_hpull(self):
self._compress_test_helper(ETL_COMM_HPULL, {"compression": "gzip"})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_gzip_compress_hpush(self):
self._compress_test_helper(ETL_COMM_HPUSH, {"compression": "gzip"})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_gzip_compress_hrev(self):
self._compress_test_helper(ETL_COMM_HREV, {"compression": "gzip"})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_bz2_compress_hpull(self):
self._compress_test_helper(ETL_COMM_HPULL, {"compression": "bz2"})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_bz2_compress_hpush(self):
self._compress_test_helper(ETL_COMM_HPUSH, {"compression": "bz2"})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_bz2_compress_hrev(self):
self._compress_test_helper(ETL_COMM_HREV, {"compression": "bz2"})
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_gzip_decompress_hpull(self):
self._decompress_test_helper(
ETL_COMM_HPULL, {"mode": "decompress", "compression": "gzip"}
)
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_gzip_decompress_hpush(self):
self._decompress_test_helper(
ETL_COMM_HPUSH, {"mode": "decompress", "compression": "gzip"}
)
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_gzip_decompress_hrev(self):
self._decompress_test_helper(
ETL_COMM_HREV, {"mode": "decompress", "compression": "gzip"}
)
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_bz2_decompress_hpull(self):
self._decompress_test_helper(
ETL_COMM_HPULL, {"mode": "decompress", "compression": "bz2"}
)
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_bz2_decompress_hpush(self):
self._decompress_test_helper(
ETL_COMM_HPUSH, {"mode": "decompress", "compression": "bz2"}
)
@unittest.skipIf(
os.getenv("COMPRESS_ENABLE", "true") == "false", "COMPRESS is disabled"
)
def test_bz2_decompress_hrev(self):
self._decompress_test_helper(
ETL_COMM_HREV, {"mode": "decompress", "compression": "bz2"}
)
| ais-etl-master | transformers/tests/test_compress.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring
import os
import unittest
from tests.utils import generate_random_str
from aistore import Client
class TestBase(unittest.TestCase):
def setUp(self):
self.endpoint = os.environ.get("AIS_ENDPOINT", "http://192.168.49.2:8080")
self.git_test_mode = os.getenv("GIT_TEST", "false")
self.client = Client(self.endpoint)
self.test_bck = self.client.bucket("test-bucket" + generate_random_str()).create(exist_ok=True)
self.test_etl = self.client.etl("test-etl-" + generate_random_str())
def tearDown(self):
self.test_bck.delete()
self.test_etl.stop()
self.test_etl.delete()
| ais-etl-master | transformers/tests/base.py |
"""
Transorming images with Keras API using FastAPI framework and Gunivorn and Uvicorn webserver.
Steps to run:
$ # with uvicorn
$ uvicorn main:app --reload
$ # with multiple uvicorn processes managed by gunicorn
$ gunicorn main:app --workers 4 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000
Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
# pylint: disable=missing-class-docstring, missing-function-docstring, missing-module-docstring, broad-exception-caught
import os
import urllib.parse
import json
import io
from fastapi import FastAPI, Request, Depends, Response
import aiohttp # async
from PIL import Image
from torchvision import transforms
app = FastAPI()
host_target = os.environ["AIS_TARGET_URL"]
transform_format = os.environ["FORMAT"]
transform_json = os.environ["TRANSFORM"]
transform_dict = json.loads(transform_json)
# Create a list to hold the transformations
transform_list = []
# Add each transformation to the list
for transform_name, params in transform_dict.items():
# Get the transform class from torchvision.transforms
transform_class = getattr(transforms, transform_name)
# Create an instance of the transform class with the specified parameters
transform_instance = transform_class(**params)
# Add the transform instance to the list
transform_list.append(transform_instance)
# Combine the transformations into a single transform
transform = transforms.Compose(transform_list)
class HttpClient:
session: aiohttp.ClientSession = None
def start(self):
self.session = aiohttp.ClientSession()
async def stop(self):
await self.session.close()
self.session = None
def __call__(self) -> aiohttp.ClientSession:
assert self.session is not None
return self.session
http_client = HttpClient()
@app.on_event("startup")
async def startup():
http_client.start()
@app.get("/health")
async def health():
return b"Running"
async def transform_image(image_bytes: bytes) -> bytes:
# Convert bytes to PIL Image
image = Image.open(io.BytesIO(image_bytes))
# Convert the PIL image to a PyTorch tensor
tensor_transform = transforms.ToTensor()
tensor = tensor_transform(image)
# Apply the transformation
transformed_tensor = transform(tensor)
# Convert the transformed tensor back to a PIL image
pil_transform = transforms.ToPILImage()
transformed_image = pil_transform(transformed_tensor)
# Convert the PIL image back to bytes
byte_arr = io.BytesIO()
transformed_image.save(byte_arr, format=transform_format)
# Get the byte array
transformed_image_bytes = byte_arr.getvalue()
return transformed_image_bytes
@app.get("/")
@app.get("/{full_path:path}", response_class=Response)
async def get_handler(
full_path: str, client: aiohttp.ClientSession = Depends(http_client)
):
"""
Handles GET requests.
Retrieves the destination/name of the object from the URL or the full_path variable,
fetches the object from the AIS target based on the destination/name,
transforms the bytes, and returns the modified bytes.
"""
# Get destination/name of object from URL or from full_path variable
# Fetch object from AIS target based on the destination/name
# Transform the bytes
# Return the transformed bytes
object_path = urllib.parse.quote(full_path, safe="@")
object_url = f"{host_target}/{object_path}"
resp = await client.get(object_url)
body = await resp.read()
return Response(
content=await transform_image(body), media_type="application/octet-stream"
)
@app.put("/")
@app.put("/{full_path:path}", response_class=Response)
async def put_handler(request: Request):
"""
Handles PUT requests.
Reads bytes from the request, performs byte transformation,
and returns the modified bytes.
"""
# Read bytes from request (request.body)
# Transform the bytes
# Return the transformed bytes
body = await request.body()
return Response(
content=await transform_image(body), media_type="application/octet-stream"
)
| ais-etl-master | transformers/torchvision_preprocess/main.py |
#!/usr/bin/env python
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import argparse
import io
import json
import logging
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import requests
from PIL import Image
from torchvision import transforms
host_target = os.environ["AIS_TARGET_URL"]
transform_format = os.environ["FORMAT"]
transform_json = os.environ["TRANSFORM"]
transform_dict = json.loads(transform_json)
# Create a list to hold the transformations
transform_list = []
# Add each transformation to the list
for transform_name, params in transform_dict.items():
# Get the transform class from torchvision.transforms
transform_class = getattr(transforms, transform_name)
# Create an instance of the transform class with the specified parameters
transform_instance = transform_class(**params)
# Add the transform instance to the list
transform_list.append(transform_instance)
# Combine the transformations into a single transform
transform = transforms.Compose(transform_list)
class Handler(BaseHTTPRequestHandler):
def log_request(self, code="-", size="-"):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
def transform_image(self, image_bytes):
# Convert bytes to PIL Image
image = Image.open(io.BytesIO(image_bytes))
# Convert the PIL image to a PyTorch tensor
tensor_transform = transforms.ToTensor()
tensor = tensor_transform(image)
# Apply the transformation
transformed_tensor = transform(tensor)
# Convert the transformed tensor back to a PIL image
pil_transform = transforms.ToPILImage()
transformed_image = pil_transform(transformed_tensor)
# Convert the PIL image back to bytes
byte_arr = io.BytesIO()
transformed_image.save(byte_arr, format=transform_format)
# Get the byte array
transformed_image_bytes = byte_arr.getvalue()
return transformed_image_bytes
def do_PUT(self):
try:
content_length = int(self.headers["Content-Length"])
put_data = self.rfile.read(content_length)
self._set_headers()
self.wfile.write(self.transform_image(put_data))
except Exception as e:
logging.error("Error processing PUT request: %s", str(e))
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
def do_GET(self):
try:
if self.path == "/health":
self._set_headers()
self.wfile.write(b"Running")
return
response = requests.get(host_target + self.path)
self._set_headers()
self.wfile.write(self.transform_image(response.content))
except Exception as e:
logging.error("Error processing GET request: %s", str(e))
self.send_response(500)
self.end_headers()
self.wfile.write(b"Data processing failed")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| ais-etl-master | transformers/torchvision_preprocess/http-multithreaded-server/server.py |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from aistore.tf import Dataset
from aistore.tf.ops import Decode, Convert, Resize
EPOCHS = 5
BATCH_SIZE = 20
# ADJUST Dataset PARAMETERS BELOW
BUCKET_NAME = "tar-bucket"
PROXY_URL = "http://localhost:8080"
# Create Dataset.
# Values will be extracted from tar-records according to Resize(Convert(Decode("jpg"), tf.float32), (224, 224)) operation,
# meaning that bytes under "jpg" in tar-record will be decoded as an image, converted to tf.float32 type and then Resized to (224, 224)
# Labels will be extracted from tar-records according to Select("cls") operation, meaning that bytes under "cls" will be treated as label.
conversions = [Decode("jpg"), Convert("jpg", tf.float32), Resize("jpg", (224, 224))]
selections = ["jpg", "cls"]
dataset = Dataset(BUCKET_NAME, PROXY_URL, conversions, selections)
# prepare your bucket first with tars (for instance gsutil ls gs://lpr-gtc2020)
train_dataset = dataset.load("train-{0..5}.tar", remote_exec=False,
num_workers=4).prefetch(EPOCHS * BATCH_SIZE).shuffle(buffer_size=1024).batch(BATCH_SIZE)
test_dataset = dataset.load("train-{5..10}.tar", remote_exec=False, num_workers=4).prefetch(BATCH_SIZE).batch(BATCH_SIZE)
# TRAINING PART BELOW
inputs = keras.Input(shape=(224, 224, 3), name="images")
x = layers.Flatten()(inputs)
x = layers.Dense(64, activation="relu", name="dense_1")(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(1e-4), loss=keras.losses.mean_squared_error, metrics=["acc"])
model.summary()
model.fit(train_dataset, epochs=EPOCHS)
result = model.evaluate(test_dataset)
print(dict(zip(model.metrics_names, result)))
| ais-etl-master | examples/imagenet_in_memory.py |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from aistore.tf import Dataset, default_record_parser
from aistore.tf.ops import Select, Decode, Convert, Resize
def path_generator():
i = 1
while True:
yield "train.record-{}".format(i)
i += 1
EPOCHS = 10
BATCH_SIZE = 20
# ADJUST Dataset PARAMETERS BELOW
BUCKET_NAME = "tar-bucket"
PROXY_URL = "http://localhost:8080"
# Create Dataset.
# Values will be extracted from tar-records according to Resize(Convert(Decode("jpg"), tf.float32), (224, 224)) operation,
# meaning that bytes under "jpg" in tar-record will be decoded as an image, converted to tf.float32 type and then Resized to (224, 224)
# Labels will be extracted from tar-records according to Select("cls") operation, meaning that bytes under "cls" will be treated as label.
dataset = Dataset(BUCKET_NAME, PROXY_URL, [Decode("jpg"), Convert("jpg", tf.float32), Resize("jpg", (224, 224))], [Select("jpg"), Select("cls")])
# prepare your bucket, for example from `gsutil ls gs://lpr-gtc2020`
# save multiple TFRecord files with max size 2MB to paths generated by path_generator
train_records_files = dataset.load("train-{0..3}.tar", path=path_generator, max_shard_size="2MB", num_workers=4)
# save TFRecord file to test.record path
dataset.load("train-{4..7}.tar", path="test.record", num_workers=4)
train_dataset = tf.data.TFRecordDataset(filenames=train_records_files)
train_dataset = train_dataset.map(default_record_parser)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(BATCH_SIZE)
test_dataset = tf.data.TFRecordDataset(filenames=["test.record"])
test_dataset = test_dataset.map(default_record_parser).batch(BATCH_SIZE)
# TRAINING PART BELOW
inputs = keras.Input(shape=(224, 224, 3), name="images")
x = layers.Flatten()(inputs)
x = layers.Dense(64, activation="relu", name="dense_1")(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(1e-4), loss=keras.losses.mean_squared_error, metrics=["acc"])
model.summary()
model.fit(train_dataset, epochs=EPOCHS)
result = model.evaluate(test_dataset)
print(dict(zip(model.metrics_names, result)))
dataset.stop()
| ais-etl-master | examples/imagenet_from_disk.py |
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
def maybe_install_c_libs():
if sys.platform == 'linux':
cmd = 'sudo apt-get update'
subprocess.check_call(cmd.split())
cmd = 'sudo apt-get install libzmq3-dev libzmq5'
subprocess.check_call(cmd.split())
'''
Build latest version 3.1.1 from git using cmake
apt-get installed old 0.5.7 version, which can't build code.
see: https://packages.ubuntu.com/source/xenial/msgpack
'''
# cmd = 'sudo apt-get install libmsgpack-dev'
cmd = './requirements.sh'
subprocess.check_call(cmd.split())
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
#Reference: https://github.com/pybind/cmake_example/blob/master/setup.py
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
build_args = []
if platform.system() != "Windows":
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
print() # Add an empty line for cleaner output.
project_name = 'nvzmq_ops'
__version__ = '0.0.1'
REQUIRED_PACKAGES = [
'tensorflow >= 1.12.0',
]
maybe_install_c_libs()
setup(
name=project_name,
version=__version__,
description=('The TensorFlow custom zmq op'),
author='Nvidia',
install_requires=REQUIRED_PACKAGES,
packages=['nvzmq_ops'],
# Set extension name with top_level dir, otherwise it will be copied to pip dist-packages dir.
# Alternative solution: pre build a nvzmq_ops.so and include it into MANIFEST.in.
ext_modules=[CMakeExtension('nvzmq_ops/nvzmq_ops')],
cmdclass=dict(build_ext=CMakeBuild),
include_package_data=True,
zip_safe=False,
license='Apache-2.0',
) | dlinput-tf-master | setup.py |
import time
import threading
import os
import numpy as np
import zmq
import msgpack
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import resources
zmq_module = tf.load_op_library('./build/nvzmq_ops/kernel/nvzmq_ops.so')
zmq_op = zmq_module.nv_zmq
zmq_conn_handle = zmq_module.zmq_conn_handle
allowable_dtypes = {"uint8", "uint16", "int16", "int32", "float16", "float32", "float64"}
TADDR_ARGS = 'zrpull://127.0.0.1:5678'
ZMQ_HWM = 100
class TestZMQResourceHandle(test.TestCase):
def test_simple(self):
with self.session():
TADDR_VALID = 'zrpull://127.0.0.1:5555'
output = zmq_conn_handle(TADDR_VALID, ZMQ_HWM, 0)
resources.initialize_resources(resources.local_resources()).run()
# assertDTypeEqual not working for resource type. it trans tf.dtype to np.dtype and resource is incompatible with numpy
#self.assertDtypeEqual(output, dtypes.resource.as_numpy_type)
self.assertEqual(type(output.dtype), type(dtypes.resource))
def test_invalid_address_type(self):
INVALID_ADDR = 'localhost:8089'
with self.assertRaises(tf.errors.InvalidArgumentError):
with self.session():
zmq_conn_handle(INVALID_ADDR, ZMQ_HWM, 0).eval()
class TestZMQOpArguments(test.TestCase):
def test_no_arguments(self):
with self.assertRaises(TypeError):
zmq_op()
def test_invalid_type_format(self):
with self.assertRaises(TypeError):
zmq_op(handle=zmq_conn_handle(address=TADDR_ARGS, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=tf.int32)
def test_invalid_type_length(self):
with self.assertRaises(ValueError):
zmq_op(handle=zmq_conn_handle(address=TADDR_ARGS, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=[])
def test_invalid_output_type(self):
with self.assertRaises(TypeError):
zmq_op(handle=zmq_conn_handle(address=TADDR_ARGS, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=[tf.bool])
def test_valid_arguments(self):
zmq_layer = zmq_op(handle=zmq_conn_handle(address=TADDR_ARGS, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=[tf.int32, tf.float32])
self.assertEqual(len(zmq_layer), 2)
self.assertEqual(type(zmq_layer[0]), tf.Tensor)
self.assertEqual(type(zmq_layer[1]), tf.Tensor)
self.assertEqual(zmq_layer[0].dtype, tf.int32)
self.assertEqual(zmq_layer[1].dtype, tf.float32)
self.assertEqual(zmq_layer[0].shape, tf.TensorShape(None))
self.assertEqual(zmq_layer[1].shape, tf.TensorShape(None))
class TestZMQOpParse(test.TestCase):
def send_msgs(socket, msgs, multipart = False):
if multipart:
socket.send_multipart(msgs)
else:
for msg in msgs:
socket.send(msg)
time.sleep(len(msg) / 1000)
# dlinput
def np2dict(a, parts=None, allow_float64=False):
"""Recursively convert numpy tensors in data structures to dictionaries."""
if isinstance(a, np.ndarray):
assert allow_float64 or a.dtype != np.dtype("float64")
dtype = str(a.dtype)
assert dtype in allowable_dtypes, dtype
if parts is None:
return dict(_shape=list(a.shape),
_dtype=dtype,
_data=a.tobytes())
else:
index = len(parts)
parts.append(a.tobytes())
return dict(_shape=list(a.shape),
_dtype=dtype,
_part=index)
elif isinstance(a, list):
return [TestZMQOpParse.np2dict(x, parts) for x in a]
elif isinstance(a, dict):
return {k: TestZMQOpParse.np2dict(v, parts) for k,v in a.items()}
else:
return a
def test_corrupt_msg_pack_data(self):
CORRUPT_ADDR = 'zrpull://127.0.0.1:5555'
TSENDER_ADDR_CORRUPT = 'tcp://127.0.0.1:5555'
ctx = zmq.Context(1)
socket = ctx.socket(zmq.PUSH)
try:
socket.bind(TSENDER_ADDR_CORRUPT)
tensor_msg = msgpack.packb([['garbage data']], use_bin_type=True)
thread = self.checkedThread(target=TestZMQOpParse.send_msgs, args=(socket, [tensor_msg]))
thread.start()
with self.assertRaises(tf.errors.DataLossError):
with self.session() as sess:
zmq_op(handle=zmq_conn_handle(address=CORRUPT_ADDR, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=[tf.int32])[0].eval()
except Exception as e:
self.fail()
finally:
thread.join()
socket.close()
ctx.term()
'''
If no timeout setting, comment following two timeout tests
'''
# def test_timeout(self):
# TADDR_VALID = 'zrpull://127.0.0.1:5555'
# output = zmq_op(handle=zmq_conn_handle(), address=TADDR_VALID, types=[tf.float32, tf.int32])
# with self.assertRaises(tf.errors.CancelledError):
# with tf.Session() as sess:
# sess.run(output)
# def test_timeout_multithread(self):
# TADDR_VALID = 'zrpull://127.0.0.1:5555'
# handle = zmq_conn_handle()
# ops = []
# for i in range(2):
# ops.extend(zmq_op(handle=handle, address=TADDR_VALID, types=[tf.float32, tf.int32]))
# with self.assertRaises(tf.errors.CancelledError):
# with self.session() as sess:
# sess.run(tf.tuple(ops))
def test_single_op_valid(self):
TADDR_VALID = 'zrpull://127.0.0.1:5555'
TSENDER_ADDR_VALID = 'tcp://127.0.0.1:5555'
SINGLE_DATA = [44]
ctx = zmq.Context(1)
socket = ctx.socket(zmq.PUSH)
try:
socket.bind(TSENDER_ADDR_VALID)
tensor_data1 = np.arange(16, dtype=np.uint8).reshape((4,4))
tensor_data2 = np.array(SINGLE_DATA, dtype=np.int32)
tensor_data_list = [tensor_data1, tensor_data2]
packed = msgpack.dumps(TestZMQOpParse.np2dict(tensor_data_list))
thread = self.checkedThread(target=TestZMQOpParse.send_msgs, args=(socket, [packed]))
thread.start()
tensors = zmq_op(handle=zmq_conn_handle(address=TADDR_VALID, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=[tf.uint8, tf.int32])
with self.session() as sess:
outputs = sess.run(tensors)
self.assertEqual(len(outputs), 2)
self.assertTrue(np.array_equal(outputs[0], np.arange(16, dtype=np.float32).reshape(4,4)))
self.assertTrue(np.array_equal(outputs[1], np.array(SINGLE_DATA, dtype=np.int32)))
except Exception as e:
self.fail()
finally:
thread.join()
socket.close()
ctx.term()
def test_multithread(self):
TADDR_VALID = 'zrpull://127.0.0.1:5556'
TSENDER_ADDR_VALID = 'tcp://127.0.0.1:5556'
NUM_THREAD= 4
try:
ctx = zmq.Context(1)
socket = ctx.socket(zmq.PUSH)
socket.bind(TSENDER_ADDR_VALID)
msgs = []
expected = []
for i in range(1, NUM_THREAD + 1):
tensor_data1 = np.arange(i*i, dtype=np.float32).reshape((i*i))
tensor_data2 = np.array([i], dtype=np.int32)
tensor_data3 = np.array([i], dtype=np.uint8)
tensor_data_list = [tensor_data1, tensor_data2, tensor_data3]
expected.append(tensor_data_list)
packed = msgpack.dumps(TestZMQOpParse.np2dict(tensor_data_list))
msgs.append(packed)
thread = self.checkedThread(target=TestZMQOpParse.send_msgs, args=(socket, msgs))
thread.start()
handle = zmq_conn_handle(address=TADDR_VALID, zmq_hwm=ZMQ_HWM, zmq_buff=0)
tensor_lists = []
for i in range(NUM_THREAD):
tensors = zmq_op(handle=handle, types=[tf.float32, tf.int32, tf.uint8])
tensor_lists.append(tensors)
with self.session() as sess:
# Writing a graph on tensorboard
# cwd = os.getcwd()
# writer = tf.summary.FileWriter(cwd + '/tfboardlogs/', sess.graph)
output = sess.run(tensor_lists)
self.assertEqual(len(output), len(expected))
output.sort(key=lambda x: (x[0].shape[0]))
for a, b in zip(output, expected):
for x, y in zip(a, b):
self.assertAllEqual(x, y)
# writer.close()
except Exception as e:
self.fail()
finally:
thread.join()
socket.close()
ctx.term()
def test_multipart(self):
TADDR_VALID = 'zrpull://127.0.0.1:5555'
TSENDER_ADDR_VALID = 'tcp://127.0.0.1:5555'
SINGLE_DATA = [44]
ctx = zmq.Context(1)
socket = ctx.socket(zmq.PUSH)
try:
socket.bind(TSENDER_ADDR_VALID)
tensor_data1 = np.arange(16, dtype=np.uint8).reshape((4,4))
tensor_data2 = np.array(SINGLE_DATA, dtype=np.int32)
tensor_data_list = [tensor_data1, tensor_data2]
parts = [None]
packed = msgpack.dumps(TestZMQOpParse.np2dict(tensor_data_list, parts))
parts[0] = packed
thread = self.checkedThread(target=TestZMQOpParse.send_msgs, args=(socket, parts, True))
thread.start()
tensors = zmq_op(handle=zmq_conn_handle(address=TADDR_VALID, zmq_hwm=ZMQ_HWM, zmq_buff=0), types=[tf.uint8, tf.int32])
with self.session() as sess:
outputs = sess.run(tensors)
self.assertEqual(len(outputs), 2)
self.assertTrue(np.array_equal(outputs[0], np.arange(16, dtype=np.float32).reshape(4,4)))
self.assertTrue(np.array_equal(outputs[1], np.array(SINGLE_DATA, dtype=np.int32)))
except Exception as e:
self.fail()
finally:
thread.join()
socket.close()
ctx.term()
if __name__ == '__main__':
test.main()
| dlinput-tf-master | test/zmq_ops_test.py |
import math
import time
import argparse
import numpy as np
import tensorflow as tf
from nvzmq_ops import ZmqOp
from tensorcom.zcom import Statistics
from math import inf
parser = argparse.ArgumentParser(description='Performance test')
parser.add_argument("-addr", "--address", type=str, default="zsub://127.0.0.1:7880")
parser.add_argument("-n", "--num", type=int, default=100)
args = parser.parse_args()
num = args.num
class Stats(object):
def __init__(self):
self.lo = inf
self.hi = -inf
self.sx = 0
self.sx2 = 0
self.n = 0
def __iadd__(self, x):
self.lo = min(self.lo, np.amin(x))
self.hi = max(self.hi, np.amax(x))
self.sx += np.sum(x)
self.sx2 += np.sum(x**2)
self.n += x.size
return self
def summary(self):
return "[{:.3g} {:.3g}] mean={:.3g} std={:.3g} n={:d}".format(
self.lo, self.hi,
self.sx/self.n,
(self.sx2/self.n - (self.sx/self.n)**2)**.5,
self.n
)
def tf_client():
zmq_op = ZmqOp(address=args.address, zmq_hwm=100)
types = [tf.dtypes.as_dtype(np.dtype('float16')), tf.dtypes.as_dtype(np.dtype('int32'))]
# Define static node and graph before for-loop
tensor_op = zmq_op.pull(types)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
shapes = set()
stats = Stats()
thoughout_stats = Statistics(1000)
count = 0
total = 0
start = time.time()
for i in range(num):
outs = sess.run(tensor_op)
thoughout_stats.add(sum([x.nbytes for x in outs]))
count += 1
a = outs[0]
total += len(a)
shapes.add((str(a.dtype),) + tuple(a.shape))
# stats is a heavy compute here.
# if you comment, you can see speed goes up
stats += a.astype(np.float32)
finish = time.time()
print("{} batches, {} samples".format(count, total))
print("{:.3g} s per batch, {:.3g} s per sample".format(
(finish - start)/count, (finish - start)/total))
print("shapes:", shapes)
print(stats.summary())
print(thoughout_stats.summary())
tf_client() | dlinput-tf-master | example/perf_test.py |
from .zmq_ops import ZmqOp
| dlinput-tf-master | nvzmq_ops/__init__.py |
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
zmq_ops = load_library.load_op_library(
resource_loader.get_path_to_datafile('nvzmq_ops.so'))
zmq_conn_handle = zmq_ops.zmq_conn_handle
zmq_op = zmq_ops.nv_zmq
'''
TODO: update when kernel changes.
'''
class ZmqOp(object):
def __init__(self, address, zmq_hwm=0, zmq_buff=0):
self._zmq_conn_handle = zmq_conn_handle(address, zmq_hwm, zmq_buff)
self._address = address
@property
def address(self):
return self._address
def pull(self, types):
return zmq_op(handle=self._zmq_conn_handle, types=types) | dlinput-tf-master | nvzmq_ops/zmq_ops.py |
"""
greenflowlab setup
"""
import json
import os
from jupyter_packaging import (
create_cmdclass, install_npm, ensure_targets,
combine_commands, skip_if_exists
)
import setuptools
HERE = os.path.abspath(os.path.dirname(__file__))
# The name of the project
name="greenflowlab"
# Get our version
with open(os.path.join(HERE, 'package.json')) as f:
version = json.load(f)['version']
lab_path = os.path.join(HERE, name, "labextension")
# Representative files that should exist after a successful build
jstargets = [
os.path.join(lab_path, "package.json"),
]
package_data_spec = {
name: [
"*"
]
}
labext_name = "greenflowlab"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, lab_path, "**"),
("share/jupyter/labextensions/%s" % labext_name, HERE, "install.json"),("etc/jupyter/jupyter_server_config.d",
"jupyter-config", "greenflowlab.json"),
]
cmdclass = create_cmdclass("jsdeps",
package_data_spec=package_data_spec,
data_files_spec=data_files_spec
)
js_command = combine_commands(
install_npm(HERE, build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = os.path.exists(os.path.join(HERE, ".git"))
if is_repo:
cmdclass["jsdeps"] = js_command
else:
cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command)
with open("README.md", "r") as fh:
long_description = fh.read()
setup_args = dict(
name=name,
version=version,
url="https://github.com/NVIDIA/fsi-samples/tree/main/greenflowlab",
author="{'name': 'Yi Dong', 'email': '[email protected]'}",
description="greenflow Jupyterlab extension",
long_description= long_description,
long_description_content_type="text/markdown",
cmdclass= cmdclass,
packages=setuptools.find_packages(),
install_requires=[
"jupyterlab>=3.0.0rc13,==3.*",
"ipywidgets",
"greenflow"
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
license="Apache",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Framework :: Jupyter",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
| fsi-samples-main | greenflowlab/setup.py |
#!/usr/bin/env python
# coding: utf-8
"""
TODO: Add module docstring
"""
import ipywidgets.widgets as widgets
import ipywidgets
from ipywidgets import DOMWidget
from traitlets import Unicode, List, Dict, Instance
from ._frontend import module_name, module_version
class GreenflowWidget(DOMWidget):
"""TODO: Add docstring here
"""
_model_name = Unicode('GreenflowModel').tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_name = Unicode('GreenflowView').tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
value = List().tag(sync=True)
cache = Dict().tag(sync=True)
sub = Instance(widgets.Widget).tag(sync=True,
**widgets.widget_serialization)
def __init__(self):
self.sub = ipywidgets.HBox()
super().__init__()
self.on_msg(self._handle_event)
def _handle_event(self, _, content, buffers):
if content.get('event', '') == 'run':
self.run()
elif content.get('event', '') == 'clean':
self.task_graph.run_cleanup(ui_clean=True)
self.sub = ipywidgets.HBox()
def set_taskgraph(self, task_graph):
self.task_graph = task_graph
def set_state(self, sync_data):
super().set_state(sync_data)
self.task_graph.reset()
self.task_graph.extend(sync_data['value'])
def run(self):
result = self.task_graph.run(formated=True)
self.sub = result
| fsi-samples-main | greenflowlab/greenflowlab/greenflowmodel.py |
__all__ = ['__version__']
def _fetchVersion():
import json
import os
HERE = os.path.abspath(os.path.dirname(__file__))
for d, _, _ in os.walk(HERE):
try:
with open(os.path.join(d, 'package.json')) as f:
return json.load(f)['version']
except FileNotFoundError:
pass
raise FileNotFoundError('Could not find package.json under dir {}'.format(HERE))
__version__ = _fetchVersion()
| fsi-samples-main | greenflowlab/greenflowlab/_version.py |
#!/usr/bin/env python
# coding: utf-8
"""
Information about the frontend package of the widgets.
"""
module_name = "greenflowlab"
module_version = "^0.1.0"
| fsi-samples-main | greenflowlab/greenflowlab/_frontend.py |
import os
import json
import tornado
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
from greenflow.dataframe_flow import TaskGraph
from greenflow.dataframe_flow.taskGraph import add_module_from_base64
from greenflow.dataframe_flow.config_nodes_modules import (
get_greenflow_config_modules, load_modules)
from .server_utils import (get_nodes, add_nodes)
try:
# For python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# prior to python 3.8 need to install importlib-metadata
import importlib_metadata
class RouteHandlerLoadGraph(APIHandler):
@tornado.web.authenticated
def get(self):
self.finish("abcde")
@tornado.web.authenticated
def post(self):
# input_data is a dictionnary with a key "name"
input_data = self.get_json_body()
task_graph = TaskGraph(input_data)
# import pudb
# pudb.set_trace()
nodes_and_edges = get_nodes(task_graph)
self.finish(json.dumps(nodes_and_edges))
class RouteHandlerLoadGraphFromPath(APIHandler):
@tornado.web.authenticated
def post(self):
# input_data is a dictionnary with a key "name"
input_data = self.get_json_body()
task_graph = TaskGraph.load_taskgraph(input_data['path'])
nodes_and_edges = get_nodes(task_graph)
self.finish(json.dumps(nodes_and_edges))
class RouteHandlerPlugins(APIHandler):
@tornado.web.authenticated
def get(self):
# load all the plugin information from the backend
modules = get_greenflow_config_modules()
client_info = {}
client_info['validation'] = {}
client_info['display'] = {}
for key in modules.keys():
if os.path.isdir(modules[key]):
mod = load_modules(modules[key])
# if hasattr(mod.mod, 'client'):
client_mod = mod.mod
if hasattr(client_mod, 'validation'):
val_dict = getattr(client_mod, 'validation')
client_info['validation'].update(val_dict)
else:
pass
# print(client_mod, 'no validation')
if hasattr(client_mod, 'display'):
val_dict = getattr(client_mod, 'display')
client_info['display'].update(val_dict)
else:
pass
# print(client_mod, 'no display')
# else:
# print(key, mod.mod, 'no client')
# load all the plugins from entry points
for entry_point in importlib_metadata.entry_points().get(
'greenflow.plugin', ()):
client_mod = entry_point.load()
if hasattr(client_mod, 'validation'):
val_dict = getattr(client_mod, 'validation')
client_info['validation'].update(val_dict)
else:
pass
# print(client_mod, 'no validation')
if hasattr(client_mod, 'display'):
val_dict = getattr(client_mod, 'display')
client_info['display'].update(val_dict)
else:
pass
# print(client_mod, 'no display')
self.finish(json.dumps(client_info))
class RouteHandlerRegister(APIHandler):
@tornado.web.authenticated
def post(self):
from .server_utils import register_node
# input_data is a dictionnary with a key "name"
input_data = self.get_json_body()
module_name = input_data['module']
class_str = input_data['class']
class_obj = add_module_from_base64(module_name, class_str)
register_node(module_name, class_obj)
self.finish(json.dumps(class_obj.__name__))
class RouteHandlerLoadAllNodes(APIHandler):
@tornado.web.authenticated
def get(self):
# input_data is a dictionnary with a key "name"
result = add_nodes()
self.finish(json.dumps(result))
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
# pass the jupyterlab server root directory to
# environment variable `GREENFLOWROOT`. Note, this
# variable is not meant to be overwritten by user.
# This variable can be used by other utility function
# to compute the absolute path of the files.
os.environ['GREENFLOWROOT'] = os.getcwd()
# load all the graphs given the input gq.yaml file contents
route_pattern0 = url_path_join(base_url, "greenflowlab", "load_graph")
route_pattern1 = url_path_join(base_url, "greenflowlab", "all_nodes")
route_pattern2 = url_path_join(base_url, "greenflowlab", "load_graph_path")
route_pattern3 = url_path_join(base_url, "greenflowlab", "register_node")
route_pattern4 = url_path_join(base_url, "greenflowlab", "register_plugins")
handlers = [(route_pattern0, RouteHandlerLoadGraph),
(route_pattern1, RouteHandlerLoadAllNodes),
(route_pattern2, RouteHandlerLoadGraphFromPath),
(route_pattern3, RouteHandlerRegister),
(route_pattern4, RouteHandlerPlugins)]
web_app.add_handlers(host_pattern, handlers)
| fsi-samples-main | greenflowlab/greenflowlab/handlers.py |
import json
import os.path as osp
from ._version import __version__
HERE = osp.abspath(osp.dirname(__file__))
with open(osp.join(HERE, 'labextension', 'package.json')) as fid:
data = json.load(fid)
def _jupyter_labextension_paths():
return [{
'src': 'labextension',
'dest': data['name']
}]
from .handlers import setup_handlers
def _jupyter_server_extension_points():
return [{
"module": "greenflowlab"
}]
def _load_jupyter_server_extension(server_app):
"""Registers the API handler to receive HTTP requests from the frontend extension.
Parameters
----------
lab_app: jupyterlab.labapp.LabApp
JupyterLab application instance
"""
setup_handlers(server_app.web_app)
server_app.log.info("Registered greenflowLab extension at URL path /greenflowlab")
| fsi-samples-main | greenflowlab/greenflowlab/__init__.py |
import inspect
import uuid
from greenflow.dataframe_flow import TaskGraph
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.task import Task
from greenflow.dataframe_flow.output_collector_node import (
OUTPUT_TYPE, OUTPUT_ID)
from greenflow.dataframe_flow import (TaskSpecSchema, PortsSpecSchema)
from greenflow.dataframe_flow.config_nodes_modules import (
load_modules, get_greenflow_config_modules, get_node_tgraphmixin_instance)
import greenflow.plugin_nodes as plugin_nodes
try:
# For python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# prior to python 3.8 need to install importlib-metadata
import importlib_metadata
from pathlib import Path
DYNAMIC_MODULES = {}
def register_node(module, classObj):
if module not in DYNAMIC_MODULES:
container = {}
DYNAMIC_MODULES[module] = container
else:
container = DYNAMIC_MODULES[module]
# Snippet below is important so that dynamic modules appear under the
# "Add Nodes" menu.
key = classObj.__name__
container[key] = classObj
def _format_port(port):
"""
compute the right port type str
Arguments
-------
port: input/output port object
Returns
-------
list
a list of ports with name and type
"""
dynamic = PortsSpecSchema.dynamic
all_ports = []
for key in port:
one_port = {}
one_port['name'] = key
if dynamic in port[key]:
one_port[dynamic] = port[key][dynamic]
port_type = port[key][PortsSpecSchema.port_type]
if isinstance(port_type, list):
types = []
for t in port_type:
type_names = [e.__module__+'.'+e.__name__ for
e in t.mro()]
types.append(type_names)
one_port['type'] = types
else:
type_name = [e.__module__+'.'+e.__name__ for
e in port_type.mro()]
one_port['type'] = [type_name]
all_ports.append(one_port)
return all_ports
def get_nodes(task_graph):
"""
It is a private function taking an input task graph. It will run the
column flow and compute the column names and types for all the nodes.
It returns a dict which has two keys.
nodes:
- list of node objects for the UI client. It contains all the
necessary information about the node including the size of the node
input ports, output ports, output meta names/types,
conf schema and conf data.
edges:
- list of edge objects for the UI client. It enumerate all the
edges in the graph.
Arguments
-------
task_graph: TaskGraph
taskgraph object
Returns
-------
dict
nodes and edges of the graph data
"""
for task in task_graph:
if (task.get(TaskSpecSchema.node_type) == OUTPUT_TYPE):
# Setting output collector ID should not be needed.
task._task_spec[TaskSpecSchema.task_id] = OUTPUT_ID
# task._task_spec[TaskSpecSchema.node_type] = Output_Collector
task_graph.build()
nodes = []
edges = []
for task in task_graph:
node = task_graph[task[TaskSpecSchema.task_id]]
out_node = get_labnode_obj(node)
connection_inputs = task.get('inputs')
nodes.append(out_node)
# out_node['output_meta'] = task_graph[node.uid].output_meta
for port, v in connection_inputs.items():
edge = {"from": v, "to": node.uid+"."+port}
edges.append(edge)
# fix the output collector inputs
if (task[TaskSpecSchema.task_id] == OUTPUT_ID):
inputs = []
num = 0
for port, v in connection_inputs.items():
inputs.append({'name': port, "type": [["any"]]})
num = max(int(port[2:]), num)
inputs.append({'name': 'in'+str(num+1), "type": [["any"]]})
out_node['inputs'] = inputs
task_graph.run_cleanup()
return {'nodes': nodes, 'edges': edges}
def init_get_labnode_obj(node, count_id=True):
node.init()
node.update()
return get_labnode_obj(node, count_id=count_id)
def get_labnode_obj(node, count_id=True):
"""
It is a private function to convert a Node instance into a dictionary for
client to consume.
Arguments
-------
node: Node
greenflow Node
Returns
-------
dict
node data for client
"""
ports = node.ports_setup()
metadata = node.meta_setup()
schema = node.conf_schema()
typeName = node._task_obj.get('type')
if node.uid == OUTPUT_ID:
width = 160
typeName = OUTPUT_TYPE
else:
if count_id:
width = max(max(len(node.uid), len(typeName)) * 10, 100)
else:
width = max(len(typeName) * 10, 100)
conf = node._task_obj.get('conf')
out_node = {'width': width,
'id': node.uid,
'type': typeName,
'schema': schema.json,
'ui': schema.ui,
'conf': conf,
'inputs': _format_port(ports.inports),
'outputs': _format_port(ports.outports)}
out_node['output_meta'] = metadata.outports
if node._task_obj.get('filepath'):
out_node['filepath'] = node._task_obj.get('filepath')
if node._task_obj.get('module'):
out_node['module'] = node._task_obj.get('module')
out_node['required'] = metadata.inports
return out_node
def get_nodes_from_file(file):
"""
Given an input yaml file string. It returns a dict which has two keys.
nodes:
- list of node objects for the UI client. It contains all the
necessary information about the node including the size of the node
input ports, output ports, output meta names/types,
conf schema and conf data.
edges:
- list of edge objects for the UI client. It enumerate all the
edges in the graph.
Arguments
-------
file: string
file name
Returns
-------
dict
nodes and edges of the graph data
"""
task_graph = TaskGraph.load_taskgraph(file)
return get_nodes(task_graph)
def add_nodes():
"""
It will load all the nodes for the UI client so user can add new node
to the graph. The nodes are from two sources: default greenflow nodes and
customized node modules.
The output is a dictionary whose keys are module names and values are a
list of the nodes inside that module.
Arguments
-------
Returns
-------
dict
dictionary of all the nodes that can be added in the client
"""
loaded_node_classes = []
all_modules = get_greenflow_config_modules()
# print('Greenflow config modules: {}\n'.format(all_modules))
all_nodes = {}
# not implemented yet for greenflow
for item in inspect.getmembers(plugin_nodes):
if inspect.ismodule(item[1]):
# print('Greenflow builtin plugin: {}'.format(item))
labmod_pkg = 'greenflow.{}'.format(item[0])
all_nodes[labmod_pkg] = []
for node in inspect.getmembers(item[1]):
nodecls = node[1]
if not inspect.isclass(nodecls):
continue
if not issubclass(nodecls, Node):
continue
if nodecls in loaded_node_classes:
continue
task = {'id': 'node_'+str(uuid.uuid4()),
'type': node[0],
'conf': {},
'inputs': []}
task_inst = Task(task)
node_inst = get_node_tgraphmixin_instance(nodecls, task_inst)
nodeObj = init_get_labnode_obj(node_inst, False)
all_nodes[labmod_pkg].append(nodeObj)
loaded_node_classes.append(nodecls)
for module in all_modules:
module_file_or_path = Path(all_modules[module])
loaded = load_modules(all_modules[module], module)
mod = loaded.mod
modulename = module
# all_nodes[modulename] = []
for node in inspect.getmembers(mod):
nodecls = node[1]
if not inspect.isclass(nodecls):
continue
if nodecls == Node:
continue
if not issubclass(nodecls, Node):
continue
if nodecls in loaded_node_classes:
continue
task = {'id': 'node_'+str(uuid.uuid4()),
'type': node[0],
'conf': {},
'inputs': [],
'module': module
}
task_inst = Task(task)
node_inst = get_node_tgraphmixin_instance(nodecls, task_inst)
nodeObj = init_get_labnode_obj(node_inst, False)
if module_file_or_path.is_dir():
# submod = nodecls.__module__.split('.')[1:]
# flatten out the namespace hierarchy
submod = nodecls.__module__.split('.')[1:2]
modulename_ = '.'.join([modulename, '.'.join(submod)]) \
if submod else modulename
all_nodes.setdefault(modulename_, []).append(nodeObj)
else:
all_nodes.setdefault(modulename, []).append(nodeObj)
loaded_node_classes.append(nodecls)
for module in DYNAMIC_MODULES.keys():
modulename = module
all_nodes[modulename] = []
for class_name in DYNAMIC_MODULES[module].keys():
nodecls = DYNAMIC_MODULES[module][class_name]
if not issubclass(nodecls, Node):
continue
if nodecls in loaded_node_classes:
continue
task = {'id': 'node_'+str(uuid.uuid4()),
'type': nodecls.__name__,
'conf': {},
'inputs': [],
'module': module
}
task_inst = Task(task)
node_inst = get_node_tgraphmixin_instance(nodecls, task_inst)
nodeObj = init_get_labnode_obj(node_inst, False)
all_nodes.setdefault(modulename, []).append(nodeObj)
loaded_node_classes.append(nodecls)
# load all the plugins from entry points
for entry_point in importlib_metadata.entry_points().get(
'greenflow.plugin', ()):
mod = entry_point.load()
modulename = entry_point.name
for node in inspect.getmembers(mod):
nodecls = node[1]
if not inspect.isclass(nodecls):
continue
if nodecls == Node:
continue
if not issubclass(nodecls, Node):
continue
if nodecls in loaded_node_classes:
continue
task = {'id': 'node_'+str(uuid.uuid4()),
'type': node[0],
'conf': {},
'inputs': [],
'module': modulename
}
task_inst = Task(task)
node_inst = get_node_tgraphmixin_instance(nodecls, task_inst)
nodeObj = init_get_labnode_obj(node_inst, False)
all_nodes.setdefault(modulename, []).append(nodeObj)
loaded_node_classes.append(nodecls)
return all_nodes
| fsi-samples-main | greenflowlab/greenflowlab/server_utils.py |
#////////////////////////////////////////////////////////////////////////////
#//
#// Copyright (C) NVIDIA Corporation. All rights reserved.
#//
#// NVIDIA Sample Code
#//
#// Please refer to the NVIDIA end user license agreement (EULA) associated
#// with this source code for terms and conditions that govern your use of
#// this software. Any use, reproduction, disclosure, or distribution of
#// this software and related documentation outside the terms of the EULA
#// is strictly prohibited.
#//
#////////////////////////////////////////////////////////////////////////////
import csv
import os
import sys
if len(sys.argv)==1:
dir = 'data/MVO3.2021.02/NYSE/'
dir = 'data/MVO3.2021.02/NASDAQ/'
elif len(sys.argv)==2:
dir = sys.argv[1]
print(dir)
else: exit()
os.chdir(dir)
fileList = os.listdir(dir)
print('files:', fileList)
print('file count:', len(fileList))
prices = []; lab = []
for file in fileList:
print(file)
f = open(file, 'r')
x = [line.split('\n')[0] for line in f.readlines()]
if (file[0:6] == 'cached') and (x[1] != 'NA'):
l = list(map(float,x[1:]))
prices.append(l)
if file == 'cachedGME.csv': print(l)
lab.append(file[6:][:-4])
print('*****')
print(len(lab))
prices = [list(x) for x in zip(*prices)]
D = len(prices)
print(D)
length = len(prices[0])
print(length)
outFile = open(dir+'/'+'prices.csv','w')
with outFile:
writer = csv.writer(outFile)
writer.writerow(lab)
writer.writerows(prices)
| fsi-samples-main | backtesting_equity_investment_strats/scripts/coalescePrices.py |
'''
'''
import pathlib
from setuptools import setup, find_packages
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / 'README.md').read_text(encoding='utf-8')
install_requires = ['dask[distributed]', 'dask[dataframe]', 'configparser',
'cloudpickle', 'PyYaml',
'jsonpath_ng', 'ruamel.yaml', 'pandas']
setup(
name='greenflow',
version='1.0.5',
description='greenflow - RAPIDS Financial Services Algorithms',
long_description=long_description,
long_description_content_type='text/markdown',
author='NVIDIA Corporation',
url='https://github.com/NVIDIA/fsi-samples/tree/main/greenflow',
packages=find_packages(include=['greenflow', 'greenflow.*']),
install_requires=install_requires,
license="Apache",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Operating System :: POSIX :: Linux',
],
entry_points={
'console_scripts': ['greenflow-flow=greenflow.flow:main'],
}
)
| fsi-samples-main | greenflow/setup.py |
from collections import namedtuple
from collections.abc import Mapping
__all__ = ['_namedtuple_with_defaults']
def _namedtuple_with_defaults(typename, field_names, default_values=()):
# https://stackoverflow.com/a/18348004/3457624
T = namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
| fsi-samples-main | greenflow/greenflow/_common.py |
from .dataframe_flow import TaskGraph
from .dataframe_flow.node import Node
from .dataframe_flow import PortsSpecSchema
__all__ = ["TaskGraph", "Node", "PortsSpecSchema"]
| fsi-samples-main | greenflow/greenflow/__init__.py |
from greenflow.dataframe_flow import TaskGraph
import argparse
def main():
parser = argparse.ArgumentParser(
description='Evaluate the dataframe flow graph')
parser.add_argument('-t', '--task', help="the yaml task file")
parser.add_argument('output', help="the output nodes", nargs='+')
args = parser.parse_args()
import pudb
pudb.set_trace()
task_graph = TaskGraph.load_taskgraph(args.task)
print('output nodes:', args.output)
task_graph.run(args.output)
if __name__ == "__main__":
main()
| fsi-samples-main | greenflow/greenflow/flow.py |
import copy
from .taskSpecSchema import TaskSpecSchema
module_cache = {}
__all__ = ['Task']
class Task(object):
''' A strong typed Task class that is converted from dictionary.
'''
def __init__(self, task_spec):
self._task_spec = {} # internal dict
# whatever is passed in has to be valid
TaskSpecSchema.validate(task_spec)
self._task_spec = copy.copy(task_spec)
# deepcopies of inputs can still be done
self._task_spec[TaskSpecSchema.inputs] = \
copy.deepcopy(task_spec[TaskSpecSchema.inputs])
def __getitem__(self, key):
return self._task_spec[key]
def get(self, key, default=None):
return self._task_spec.get(key, default)
if __name__ == "__main__":
t = {'id': 'test',
'type': "DropNode",
'conf': {},
'inputs': ["node_other"]}
task = Task(t)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/task.py |
import re
import importlib
from .portsSpecSchema import (PortsSpecSchema, NodePorts)
from .metaSpec import (MetaDataSchema, MetaData)
__all__ = ['NodeExtensionMixin']
TYPES_CACHE = {}
class NodeExtensionMixin:
def _sep_variable(self, variable):
assert isinstance(variable, str)
e = re.search('^\${(.*):(.*)}$', variable) # noqa
if e is None and variable.startswith('$'):
raise ValueError("varaible format is wrong")
if e is None:
return None
groups = e.groups()
return groups
def _parse_variable(self, variable, port_inports):
if isinstance(variable, int):
return variable
if isinstance(variable, dict):
return variable
if variable is None:
return None
port_type = PortsSpecSchema.port_type
groups = self._sep_variable(variable)
if groups is None:
return variable
if groups[0] == 'conf':
return self.conf[groups[1]]
elif groups[0] == 'port':
return port_inports[groups[1]][port_type]
else:
raise KeyError('Cannot parse variable {}'.format(groups))
def _load_type(self, type_str):
return_list = False
if isinstance(type_str, list):
return_list = True
type_str_list = type_str
else:
type_str_list = [type_str]
clsobj_list = []
for type_str in type_str_list:
if type_str in TYPES_CACHE:
clsobj_list.append(TYPES_CACHE[type_str])
if isinstance(type_str, type):
clsobj = type_str
elif isinstance(type_str, str):
splits = type_str.split('.')
mod_str = ".".join(splits[:-1])
mod = importlib.import_module(mod_str)
clsobj = getattr(mod, splits[-1])
TYPES_CACHE[type_str] = clsobj
else:
raise Exception('Cannot load type: {}'.format(type_str))
clsobj_list.append(clsobj)
if return_list:
return clsobj_list
else:
return clsobj_list[0]
def _resolve_ports(self, ports_template):
'''The ports can be defined via template specification.
Example:
port_inports = {
"port0_name": {
PortsSpecSchema.port_type: ["type0", "type1"]
},
"port1_name": {
PortsSpecSchema.port_type: "${conf:some_type}",
PortsSpecSchema.dynamic: {
# choie can be True/False, list of types or string
# True, generate outports matching the
# connected dynamic input ports, use the
# the same type as the dynamic port
# False, not generate matching outports
# list of types or string, same as True condition,
# but use the specified types
PortsSpecSchema.DYN_MATCH: ['type0', 'type1']
}
},
...
}
port_outports = {
"port0_name": {
PortsSpecSchema.port_type: ["type0", "type1"]
},
"port1_name": {
PortsSpecSchema.port_type: "${port:port0_name}"
},
...
}
ports_template = NodePorts(inports=port_inports,
outports=port_outports)
ports_resolved = self._resolve_ports(ports_template)
Above, the types are specified as strings and loaded dynamically.
Additionally an input port can use "dynamic" syntax for automatically
resolving types for the input connections to that port. The output
ports can similarlly define types as string to be loaded dynamically,
and make references to port inputs to re-use an input port's types.
After calling _resolve_ports the ports definitions would look something
as follows:
ports_resolved.inports == {
"port0_name": {
PortsSpecSchema.port_type: [type0, type1]
},
"port1_name": {
PortsSpecSchema.port_type: "${conf:some_type}",
PortsSpecSchema.dynamic: {
PortsSpecSchema.DYN_MATCH: [type0, type1]
}
},
...
}
ports_resolved.inports == {
"port0_name": {
PortsSpecSchema.port_type: [type0, type1]
},
"port1_name": {
PortsSpecSchema.port_type: "${port:port0_name}"
},
...
}
Port types using "$" syntax are resolved when the node is within a
taskgraph context. This additional resolve logic is handled in
:class:`NodeTaskGraphExtensionMixin.port_setup_ext`.
:param ports_template: Ports definition via convenience templating.
:type ports_template: NodePorts
:returns: Resolved ports.
:rtype: NodePorts
'''
ports = ports_template
dy = PortsSpecSchema.dynamic
port_type = PortsSpecSchema.port_type
# resolve all the variables
port_inports = {}
inports = ports.inports
for key in inports:
key_name = self._parse_variable(key, inports)
value = inports[key]
ptype = value[port_type]
return_list = False
if isinstance(ptype, list):
return_list = True
ptype_list = ptype
else:
ptype_list = [ptype]
loaded_types = [
self._load_type(self._parse_variable(item, inports))
if not isinstance(item, type) else item
for item in ptype_list
]
if return_list:
value[port_type] = loaded_types
else:
value[port_type] = loaded_types[0]
if dy in value:
dynamic_value = value[dy]
m_outputs = dynamic_value[PortsSpecSchema.DYN_MATCH]
if isinstance(m_outputs, bool):
pass
elif isinstance(m_outputs, list):
dynamic_value[PortsSpecSchema.DYN_MATCH] = [
self._load_type(self._parse_variable(item, inports))
if not isinstance(item, type) else item
for item in m_outputs
]
elif isinstance(m_outputs, str):
dynamic_value[PortsSpecSchema.DYN_MATCH] = self._load_type(
self._parse_variable(m_outputs, inports))
else:
raise ValueError
port_inports[key_name] = value
# resolve all the variables
port_outports = {}
outports = ports.outports
for key in outports:
key_name = self._parse_variable(key, port_inports)
value = outports[key]
if isinstance(value[port_type], list):
value[port_type] = [
self._load_type(self._parse_variable(item, port_inports))
if not isinstance(item, type) else item
for item in value[port_type]
]
elif isinstance(value[port_type], str):
# This part is valid if node is part of NodeTaskGraphMixin
if not value[port_type].startswith('$'):
value[port_type] = self._load_type(
self._parse_variable(value[port_type],
port_inports))
else:
# it will be resolved inside the port_setup_ext
pass
elif isinstance(value[port_type], type):
pass
else:
raise ValueError
port_outports[key_name] = value
return NodePorts(inports=port_inports, outports=port_outports)
def _resolve_meta(self, meta_template, port_inports):
meta = meta_template
meta_inports = {}
metainports = meta.inports
for key in metainports:
key_name = self._parse_variable(key, port_inports)
value = metainports[key]
new_value = {}
for vk in value:
nvk = self._parse_variable(vk, port_inports)
new_value[nvk] = self._parse_variable(value[vk],
port_inports)
meta_inports[key_name] = new_value
meta_outports = {}
metaoutports = meta.outports
data_accessor = MetaDataSchema.META_DATA
order_accessor = MetaDataSchema.META_ORDER
for key in metaoutports:
meta_outports[key] = metaoutports[key].copy()
key_name = self._parse_variable(key, port_inports)
value = metaoutports[key]
if data_accessor in value:
new_data = {}
for vk in value[data_accessor]:
nvk = self._parse_variable(vk, port_inports)
new_data[nvk] = self._parse_variable(
value[data_accessor][vk], port_inports)
meta_outports[key_name][data_accessor] = new_data
if order_accessor in value:
new_order = {}
for vk in value[order_accessor]:
nvk = self._parse_variable(vk, port_inports)
new_order[nvk] = value[order_accessor][vk]
meta_outports[key_name][order_accessor] = new_order
return MetaData(inports=meta_inports, outports=meta_outports)
| fsi-samples-main | greenflow/greenflow/dataframe_flow/_node_extension_mixin.py |
import abc
__all__ = ['_Node']
# compatible with Python 2 *and* 3:
_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
class _Node(_ABC):
'''Intermediate class to identify Node class instances and avoid cyclic
imports.'''
| fsi-samples-main | greenflow/greenflow/dataframe_flow/_node.py |
import os
import cloudpickle
import base64
import pathlib
def get_file_path(path: str) -> str:
"""
@path: the relative or absolute file path
returns: absolute file path
"""
if path.startswith('/'):
return path
if 'GREENFLOWROOT' in os.environ:
ROOT = pathlib.Path(os.environ['GREENFLOWROOT'])
else:
ROOT = pathlib.Path(os.getcwd())
if os.path.exists(path):
return path
path = pathlib.Path(path)
if (ROOT/path).absolute().parent.exists():
return str(ROOT/path)
else:
print('current path', os.getcwd())
print('input path', path)
print('cannot find the file')
raise FileNotFoundError("File path cannnot be found")
def get_encoded_class(classObj):
pickled = cloudpickle.dumps(classObj)
encoding = base64.b64encode(pickled).decode()
return encoding
| fsi-samples-main | greenflow/greenflow/dataframe_flow/util.py |
Subsets and Splits