python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
"""EmotionNet visualization util scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import errno
import os
import numpy as np
import json
import argparse
def mkdir_p(new_path):
"""Makedir, making also non-existing parent dirs.
Args:
new_path (str): path to the directory to be created
"""
try:
print(new_path)
os.makedirs(new_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
else:
raise
def check_dataset_structure(data_path):
"""Check the dataset structure.
Args:
data_path (str): path to the dataset
"""
ret = True
if not os.path.isdir(data_path):
print("Dataset does not exist.")
ret = False
image_path = os.path.join(data_path, 'cohn-kanade-images')
emotion_label_path = os.path.join(data_path, 'Emotion')
landmarks_label_path = os.path.join(data_path, 'Landmarks')
if not os.path.isdir(image_path):
print("Image data path {} does not exist.".format(image_path))
ret = False
if not os.path.isdir(emotion_label_path):
print("Emotion label path {} does not exist.".format(emotion_label_path))
ret = False
if not os.path.isdir(landmarks_label_path):
print("Landmarks label path {} does not exist.".format(landmarks_label_path))
ret = False
return ret, image_path, emotion_label_path, landmarks_label_path
def extract_face_bbox(landmarks_2D):
"""Extract face bounding box from 2D bounding box.
Args:
landmarks_2D (array): 2D landmarks array
"""
data_x = landmarks_2D[:, 0]
data_y = landmarks_2D[:, 1]
x_min = min(data_x)
y_min = min(data_y)
x_max = max(data_x)
y_max = max(data_y)
x1 = x_min
y1 = y_min
x2 = x_max
y2 = y_max
return list(map(int, [x1, y1, x2-x1, y2-y1]))
def isEmpty(path):
"""Determine if a directory is empty.
Args:
path (str): path to the directory
"""
isEmpty = False
if os.path.exists(path) and not os.path.isfile(path):
# Checking if the directory is empty or not
if not os.listdir(path):
isEmpty = True
else:
isEmpty = False
else:
isEmpty = True
return isEmpty
def read_landmarks_data(landmarks_file_path):
"""Read landmarks data.
Args:
landmarks_file_path (str): input landmarks path.
"""
landmarks_data = []
with open(landmarks_file_path, 'r') as f:
contents = f.readlines()
for j in range(len(contents)):
content = contents[j].rstrip('\n')
content = content.split(' ')
for k in range(len(content)):
if(content[k]!='' and content[k]!='\n'):
landmarks_data.append(float(content[k]))
landmarks_data = np.array(landmarks_data, dtype=np.float32)
landmarks_data = landmarks_data.astype(np.longdouble)
landmarks_2D = landmarks_data.reshape(-1, 2)
return landmarks_2D
def parse_args(args=None):
"""parse the arguments.
Args:
args (list): input argument
"""
parser = argparse.ArgumentParser(description='Transfer CK+ dataset for EmotionNet')
parser.add_argument(
"--root_path",
type=str,
required=True,
help="Root path to the testing dataset"
)
parser.add_argument(
"--dataset_folder_name",
type=str,
required=True,
help="CK+ dataset folder name"
)
parser.add_argument(
"--container_root_path",
type=str,
required=True,
help="Root path to the testing dataset inside container"
)
return parser.parse_args(args)
def main(args=None):
"""Main function to parse CK+ public dataset.
Args:
args (list): input argument
"""
args = parse_args(args)
root_path = args.root_path
dataset_folder_name = args.dataset_folder_name
container_root_path = args.container_root_path
data_path = os.path.join(root_path, 'orgData', dataset_folder_name)
container_data_path = os.path.join(container_root_path, 'orgData', dataset_folder_name)
output_path = os.path.join(root_path, 'postData', dataset_folder_name)
ret, image_dir, emotion_label_dir, landmarks_label_dir = check_dataset_structure(data_path)
if not ret:
raise Exception("CK+ dataset does not match expected structure.")
# create path for json labels:
json_result_path = os.path.join(data_path, 'data_factory', 'fiducial')
mkdir_p(json_result_path)
emotion_map_ckpulus = {0: 'neutral', 1: 'angry', 2: 'contempt', 3: 'disgust', 4: 'fear', 5: 'happy', 6: 'sad', 7: 'surprise'}
neutral_image_percentage = 0.1
emotion_image_percentage = 0.9
user_list = os.listdir(image_dir)
for k in range(0, len(user_list)):
user_name = user_list[k]
user_png_path = os.path.join(image_dir, user_name)
sequence_png_list = os.listdir(user_png_path)
for seq in sequence_png_list:
seq_png_path = os.path.join(user_png_path, seq)
seq_landmarks_path = os.path.join(landmarks_label_dir, user_name, seq)
seq_emotion_path = os.path.join(emotion_label_dir, user_name, seq)
if isEmpty(seq_emotion_path) or isEmpty(seq_landmarks_path) or\
isEmpty(seq_png_path):
continue
else:
label_file_list = os.listdir(seq_emotion_path)
# For CK+, only one emotion label text file exist in each sequence folder
assert(len(label_file_list)) == 1
emotion_label_path = os.path.join(seq_emotion_path, label_file_list[0])
f = open(emotion_label_path, 'r')
emotion_label = int(float(f.read()))
emotion_name_ckplus = emotion_map_ckpulus[emotion_label]
# get image file
image_file_list = os.listdir(seq_png_path)
if '.DS_Store' in image_file_list:
image_file_list.remove('.DS_Store')
image_num_all = len(image_file_list)
image_num_neutral = max(1, int(image_num_all * neutral_image_percentage))
image_num_curr_emotion = max(1, int(image_num_all * emotion_image_percentage))
neutral_list_prefix = []
curr_emotion_prefix = []
for i in range(1, image_num_all + 1):
frame_id = str(i).zfill(8)
file_prefix = user_name + '_' + seq + '_' + frame_id
if i <= image_num_neutral:
neutral_list_prefix.append(file_prefix)
elif i > image_num_curr_emotion:
curr_emotion_prefix.append(file_prefix)
else:
continue
ret = False
for file_prefix in neutral_list_prefix:
emotion_name = 'neutral'
ret = setup_frame_dict(file_prefix, seq_png_path, seq_landmarks_path,
emotion_label, emotion_name, user_name,
json_result_path, data_path, container_data_path, False)
for file_prefix in curr_emotion_prefix:
ret = setup_frame_dict(file_prefix, seq_png_path, seq_landmarks_path,
emotion_label, emotion_name_ckplus, user_name,
json_result_path, data_path, container_data_path, False)
if not ret:
continue
def setup_frame_dict(file_prefix, image_path, landmarks_path, emotion_label,
emotion_class_name, user_name, json_result_path,
data_path, container_data_path, debug=False):
"""Set up frame dictionary.
Args:
file_prefix (str): prefix for the file
image_path (str): path to the image
landmarks_path (str): path to the landmarks
emotion_label (int): emotion label id of the provided image
emotion_class_name (str): emotion class name of the provided image
user_name (str): user name
json_result_path (str): json result path
debug (bool): debug flag
"""
image_file_name = file_prefix + '.png'
landmarks_file_name = file_prefix + '_landmarks.txt'
frame_path = os.path.join(image_path, image_file_name)
image_frame = cv2.imread(frame_path)
assert image_frame.shape[0] > 0 and image_frame.shape[1] > 0
# read landmarks file and process/normalize the landmarks
landmarks_file_path = os.path.join(landmarks_path, landmarks_file_name)
landmarks_2D = read_landmarks_data(landmarks_file_path)
num_landmarks = landmarks_2D.shape[0]
facebbox = extract_face_bbox(landmarks_2D)
main_label_json = []
label_json = {}
label_json['class'] = 'image'
path_info = frame_path.split(data_path)
container_frame_path = container_data_path + path_info[-1]
label_json['filename'] = container_frame_path
label_annotations = []
facebbox_dict = {}
landmarks_dict = {}
# set face bounding box dictionary
facebbox_dict['class'] = "FaceBbox"
landmarks_dict["tool-version"] = "1.0"
facebbox_dict['face_tight_bboxx'] = str(facebbox[0])
facebbox_dict['face_tight_bboxy'] = str(facebbox[1])
facebbox_dict['face_tight_bboxwidth'] = str(facebbox[2])
facebbox_dict['face_tight_bboxheight'] = str(facebbox[3])
# set landmarks face bounding box dictionary
landmarks_dict['class'] = "FiducialPoints"
landmarks_dict['tool-version'] = "1.0"
for k in range(0, num_landmarks):
pt_x_name = 'P' + str(k + 1) + 'x'
pt_y_name = 'P' + str(k + 1) + 'y'
landmarks_dict[pt_x_name] = float(landmarks_2D[k][0])
landmarks_dict[pt_y_name] = float(landmarks_2D[k][1])
label_annotations.append(facebbox_dict)
label_annotations.append(landmarks_dict)
label_json['annotations'] = label_annotations
main_label_json.append(label_json)
json_file_name = os.path.join(json_result_path, file_prefix + '_' + emotion_class_name + '.json')
print("Generate json: ", json_file_name)
with open(json_file_name, "w") as label_file:
json.dump(main_label_json, label_file, indent=4)
return True
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/emotionnet/ckplus_convert.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT YOLOv4 example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/yolo_v4/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to prepare train/val dataset for Unet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
from PIL import Image, ImageSequence
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for UNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to ISBI Tiff Files"
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to processes images from ISBI Tiff files."
)
return parser.parse_args(args)
def save_arrays_as_images(arr, im_dir):
"""Utility function to save the images to dir from arrays."""
for idx, arr in enumerate(arr):
img_name = os.path.join(im_dir, "image_{}.png".format(idx))
cv2.imwrite(img_name, arr)
def load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def check_and_create(d):
"""Utility function to create a dir if not present"""
if not os.path.isdir(d):
os.makedirs(d)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
train_images_tif = os.path.join(args.input_dir, "train-volume.tif")
train_masks_tif = os.path.join(args.input_dir, "train-labels.tif")
test_images_tif = os.path.join(args.input_dir, "test-volume.tif")
output_images_dir = os.path.join(args.output_dir,"images")
output_masks_dir = os.path.join(args.output_dir,"masks")
# Creating the images dir for train, test, val
train_images_dir = os.path.join(output_images_dir,"train")
val_images_dir = os.path.join(output_images_dir,"val")
test_images_dir = os.path.join(output_images_dir,"test")
train_masks_dir = os.path.join(output_masks_dir,"train")
val_masks_dir = os.path.join(output_masks_dir,"val")
check_and_create(train_images_dir)
check_and_create(val_images_dir)
check_and_create(test_images_dir)
check_and_create(train_masks_dir)
check_and_create(val_masks_dir)
train_np_arrays_images = load_multipage_tiff(train_images_tif)
train_np_arrays_masks = load_multipage_tiff(train_masks_tif)
test_np_arrays_images = load_multipage_tiff(test_images_tif)
# Splitting the train numpy arrays into train and val
train_np_arrays_images_final = train_np_arrays_images[:20,:,:]
train_np_arrays_masks_final = train_np_arrays_masks[:20,:,:]
val_np_arrays_images_final = train_np_arrays_images[20:,:,:]
val_np_arrays_masks_final = train_np_arrays_masks[20:,:,:]
# Saving the train arrays as images
save_arrays_as_images(train_np_arrays_images_final, train_images_dir)
save_arrays_as_images(train_np_arrays_masks_final, train_masks_dir)
# Saving the val arrays as images
save_arrays_as_images(val_np_arrays_images_final, val_images_dir)
save_arrays_as_images(val_np_arrays_masks_final, val_masks_dir)
# Saving the test arrays as images
save_arrays_as_images(test_np_arrays_images, test_images_dir)
print("Prepared data successfully !")
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/unet/tao_isbi/prepare_data_isbi.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to visualize the Ground truth masks overlay for Unet tutorial."""
import os
import random
import argparse
import cv2
import numpy as np
def get_color_id(num_classes):
"""Function to return a list of color values for each class."""
colors = []
for idx in range(num_classes):
random.seed(idx)
colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
return colors
def overlay_seg_image(inp_img, seg_img):
"""The utility function to overlay mask on original image."""
orininal_h = inp_img.shape[0]
orininal_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (orininal_w, orininal_h))
overlayed_img = (inp_img/2 + seg_img/2).astype('uint8')
return overlayed_img
def visualize_masks(img_dir, mask_dir, vis_dir, num_imgs=None, num_classes=2):
"""The function to visualize the segmentation masks.
Args:
img_dir: Directory of images.
mask_dir: Mask images annotations.
vis_dir: The output dir to save visualized images.
num_classes: number of classes.
num_imgs: number of images to visualize.
"""
# Create the visualization dir
if not os.path.isdir(vis_dir):
os.makedirs(vis_dir)
colors = get_color_id(num_classes)
img_names = os.listdir(img_dir)
if not num_imgs:
num_imgs = len(img_names)
mask_sample_name = os.listdir(mask_dir)[0]
mask_ext = mask_sample_name.split(".")[-1]
for img_name in img_names[:num_imgs]:
img_path = os.path.join(img_dir, img_name)
orig_image = cv2.imread(img_path)
output_height = orig_image.shape[0]
output_width = orig_image.shape[1]
segmented_img = np.zeros((output_height, output_width, 3))
pred = cv2.imread(os.path.join(mask_dir, img_name.split(".")[0]+"."+mask_ext),0)
for c in range(len(colors)):
seg_arr_c = pred[:, :] == c
segmented_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
segmented_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
segmented_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
fused_img = overlay_seg_image(orig_image, segmented_img)
cv2.imwrite(os.path.join(vis_dir, img_name), fused_img)
def build_command_line_parser():
"""
Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
parser = argparse.ArgumentParser(
prog='Visualize Segmentation.', description='Overlay Segmentation.')
parser.add_argument(
'-i',
'--imgs_dir',
type=str,
default=None,
help='Path to folder where images are saved.'
)
parser.add_argument(
'-m',
'--masks_dir',
type=str,
default=None,
help='Path to a folder where mask images are saved.'
)
parser.add_argument(
'-o',
'--vis_dir',
type=str,
default=None,
help='Path to a folder where the segmentation overlayed images are saved.'
)
parser.add_argument(
'--num_classes',
type=int,
default=None,
help='Number of classes.'
)
parser.add_argument(
'--num_images',
type=int,
default=None,
help='Number of images to visualize.'
)
return parser
def parse_command_line_args():
"""Parser command line arguments to the trainer.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser()
args = parser.parse_args()
return args
def main():
args = parse_command_line_args()
visualize_masks(args.imgs_dir, args.masks_dir, args.vis_dir, args.num_images, args.num_classes)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/unet/tao_isbi/vis_annotation_isbi.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT YOLO example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/yolo_v3/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to prepare train/val dataset for Unet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
from PIL import Image, ImageSequence
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for UNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to ISBI Tiff Files"
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to processes images from ISBI Tiff files."
)
return parser.parse_args(args)
def save_arrays_as_images(arr, im_dir):
"""Utility function to save the images to dir from arrays."""
for idx, arr in enumerate(arr):
img_name = os.path.join(im_dir, "image_{}.png".format(idx))
cv2.imwrite(img_name, arr)
def load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def check_and_create(d):
"""Utility function to create a dir if not present"""
if not os.path.isdir(d):
os.makedirs(d)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
print("Preparing Dataset Succesfully.")
train_images_tif = os.path.join(args.input_dir, "train-volume.tif")
train_masks_tif = os.path.join(args.input_dir, "train-labels.tif")
test_images_tif = os.path.join(args.input_dir, "test-volume.tif")
output_images_dir = os.path.join(args.output_dir,"images")
output_masks_dir = os.path.join(args.output_dir,"masks")
# Creating the images dir for train, test, val
train_images_dir = os.path.join(output_images_dir,"train")
val_images_dir = os.path.join(output_images_dir,"val")
test_images_dir = os.path.join(output_images_dir,"test")
train_masks_dir = os.path.join(output_masks_dir,"train")
val_masks_dir = os.path.join(output_masks_dir,"val")
check_and_create(train_images_dir)
check_and_create(val_images_dir)
check_and_create(test_images_dir)
check_and_create(train_masks_dir)
check_and_create(val_masks_dir)
train_np_arrays_images = load_multipage_tiff(train_images_tif)
train_np_arrays_masks = load_multipage_tiff(train_masks_tif)
test_np_arrays_images = load_multipage_tiff(test_images_tif)
# Splitting the train numpy arrays into train and val
train_np_arrays_images_final = train_np_arrays_images[:20,:,:]
train_np_arrays_masks_final = train_np_arrays_masks[:20,:,:]
val_np_arrays_images_final = train_np_arrays_images[20:,:,:]
val_np_arrays_masks_final = train_np_arrays_masks[20:,:,:]
# Saving the train arrays as images
save_arrays_as_images(train_np_arrays_images_final, train_images_dir)
save_arrays_as_images(train_np_arrays_masks_final, train_masks_dir)
# Saving the val arrays as images
save_arrays_as_images(val_np_arrays_images_final, val_images_dir)
save_arrays_as_images(val_np_arrays_masks_final, val_masks_dir)
# Saving the test arrays as images
save_arrays_as_images(test_np_arrays_images, test_images_dir)
print("Prepared data successfully !")
if __name__ == "__main__":
main() | tao_tutorials-main | notebooks/tao_launcher_starter_kit/segformer/prepare_data_isbi.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to visualize the Ground truth masks overlay for Unet tutorial."""
import os
import random
import argparse
import cv2
import numpy as np
def get_color_id(num_classes):
"""Function to return a list of color values for each class."""
colors = []
for idx in range(num_classes):
random.seed(idx)
colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
return colors
def overlay_seg_image(inp_img, seg_img):
"""The utility function to overlay mask on original image."""
orininal_h = inp_img.shape[0]
orininal_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (orininal_w, orininal_h))
overlayed_img = (inp_img/2 + seg_img/2).astype('uint8')
return overlayed_img
def visualize_masks(img_dir, mask_dir, vis_dir, num_imgs=None, num_classes=2):
"""The function to visualize the segmentation masks.
Args:
img_dir: Directory of images.
mask_dir: Mask images annotations.
vis_dir: The output dir to save visualized images.
num_classes: number of classes.
num_imgs: number of images to visualize.
"""
# Create the visualization dir
if not os.path.isdir(vis_dir):
os.makedirs(vis_dir)
colors = get_color_id(num_classes)
img_names = os.listdir(img_dir)
if not num_imgs:
num_imgs = len(img_names)
mask_sample_name = os.listdir(mask_dir)[0]
mask_ext = mask_sample_name.split(".")[-1]
for img_name in img_names[:num_imgs]:
img_path = os.path.join(img_dir, img_name)
orig_image = cv2.imread(img_path)
output_height = orig_image.shape[0]
output_width = orig_image.shape[1]
segmented_img = np.zeros((output_height, output_width, 3))
pred = cv2.imread(os.path.join(mask_dir, img_name.split(".")[0]+"."+mask_ext),0)
for c in range(len(colors)):
seg_arr_c = pred[:, :] == c
segmented_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
segmented_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
segmented_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
fused_img = overlay_seg_image(orig_image, segmented_img)
cv2.imwrite(os.path.join(vis_dir, img_name), fused_img)
def build_command_line_parser():
"""
Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
parser = argparse.ArgumentParser(
prog='Visualize Segmentation.', description='Overlay Segmentation.')
parser.add_argument(
'-i',
'--imgs_dir',
type=str,
default=None,
help='Path to folder where images are saved.'
)
parser.add_argument(
'-m',
'--masks_dir',
type=str,
default=None,
help='Path to a folder where mask images are saved.'
)
parser.add_argument(
'-o',
'--vis_dir',
type=str,
default=None,
help='Path to a folder where the segmentation overlayed images are saved.'
)
parser.add_argument(
'--num_classes',
type=int,
default=None,
help='Number of classes.'
)
parser.add_argument(
'--num_images',
type=int,
default=None,
help='Number of images to visualize.'
)
return parser
def parse_command_line_args():
"""Parser command line arguments to the trainer.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser()
args = parser.parse_args()
return args
def main():
args = parse_command_line_args()
visualize_masks(args.imgs_dir, args.masks_dir, args.vis_dir, args.num_images, args.num_classes)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/segformer/vis_annotation_isbi.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT RetinaNet example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/retinanet/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to transform Wider face dataset to kitti format for Facenet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
def letterbox_image(image, target_size):
"""Resize image preserving aspect ratio using padding.
Args:
image (numpy.ndarray): Input image to be resized
target_size (tuple): Target image dimensions in (H,W,C) format.
Return:
new_image (numpy.ndarray): Output Image post resize.
scale (float): Scale factor of resize.
dx (int): Padding along x dimension to main aspect ratio.
dy (int): Padding along y dimension to main aspect ratio.
"""
iw, ih = image.shape[0:2][::-1]
w, h = target_size[1], target_size[0]
scale = min(float(w)/float(iw), float(h)/float(ih))
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)
new_image = np.zeros(target_size, dtype=np.uint8)
dx = (w-nw)//2
dy = (h-nh)//2
new_image[dy:dy+nh, dx:dx+nw, :] = image
return new_image, scale, dx, dy
def adjust_box_coords(x1, y1, x2, y2, scale, dx, dy, image_height, image_width):
"""Adjust bounding box coordinates based on resize.
Args:
x1 (int): Top left x-coordinate of bounding box before resize.
y1 (int): Top left y-coordinate of bounding box before resize.
x2 (int): Bottom right x-coordinate of bounding box before resize.
y2 (int): Bottom right y-coordinate of bounding box before resize.
scale (int): Scale factor of resize.
dx (int): Padding along x dimension to main aspect ratio.
dy (int): Padding along y dimension to main aspect ratio.
image_height (int): Height of resized image.
image_width (int): Width of resized image.
Return:
x1 (int): Top left x-coordinate of bounding box after resize.
y1 (int): Top left y-coordinate of bounding box after resize.
x2 (int): Bottom right x-coordinate of bounding box after resize.
y2 (int): Bottom right y-coordinate of bounding box after resize.
"""
x1 = (int(dx + x1*scale))
x1 = min(max(x1, 0), image_width)
y1 = (int(dy + y1*scale))
y1 = min(max(y1, 0), image_height)
x2 = (int(dx + x2*scale))
x2 = min(max(x2, 0), image_width)
y2 = (int(dy + y2*scale))
y2 = min(max(y2, 0), image_height)
return x1, y1, x2, y2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Transform Wider dataset for Facenet tutorial')
parser.add_argument(
"--input_image_dir",
type=str,
required=True,
help="Input directory to Wider dataset images."
)
parser.add_argument(
"--input_label_file",
type=str,
required=True,
help="Input path to Wider dataset labels."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT dataset."
)
parser.add_argument(
"--image_height",
type=int,
required=True,
help="Height of output image."
)
parser.add_argument(
"--image_width",
type=int,
required=True,
help="Width of output image."
)
parser.add_argument(
"--grayscale",
required=False,
action='store_true',
help='Convert images to grayscale.'
)
return parser.parse_args(args)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
target_img_path = os.path.join(args.output_dir, "images")
target_label_path = os.path.join(args.output_dir, "labels")
target_size = (args.image_height, args.image_width, 3)
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
else:
print("This script will not run as output image path already exists.")
return
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
else:
print("This script will not run as output label path already exists.")
return
# read wider ground truth file
fd_gt_file = os.path.join(args.input_label_file)
f = open(fd_gt_file, 'r')
fd_gt = f.readlines()
f.close()
total_cnt = 0
i = 0
image_name = None
while i < len(fd_gt):
line = fd_gt[i].strip()
if "jpg" in line:
# start of new image
total_cnt += 1
image_name = line
image_prefix = image_name.split("/")[-1].split(".")[0]
image_path = os.path.join(args.input_image_dir, line)
if not os.path.exists(image_path):
print("Error reading image, Please check data")
return
# Transform Image
img = cv2.imread(image_path)
new_image, scale, dx, dy = letterbox_image(img, target_size)
if args.grayscale:
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)
new_image = np.expand_dims(new_image, axis=-1)
new_image = np.repeat(new_image, 3, axis=-1)
i += 1
num_bbox_in_image = int(fd_gt[i].strip())
i += 1
labels = []
for k in range(num_bbox_in_image):
label = fd_gt[i].strip()
label_parts = label.split(" ")
kitti_output = [0]*15
kitti_output[0] = "face"
kitti_output[2] = label_parts[8]
x1 = int(label_parts[0])
y1 = int(label_parts[1])
x2 = int(label_parts[2]) + x1
y2 = int(label_parts[3]) + y1
x1, y1, x2, y2 = adjust_box_coords(
x1, y1, x2, y2, scale, dx, dy, args.image_height, args.image_width)
kitti_output[4:8] = x1, y1, x2, y2
kitti_output = [str(x) for x in kitti_output]
labels.append(" ".join(kitti_output))
i += 1
if len(labels) != num_bbox_in_image:
print("Error parsing label, skipping")
continue
# save image and label
cv2.imwrite(os.path.join(target_img_path, image_prefix+".png"), new_image)
# save label
with open(os.path.join(target_label_path, image_prefix+".txt"), 'w') as f:
for item in labels:
f.write("%s\n" % item)
elif set(line.split(" ")) == {'0'}:
# no faces in image, continuing
i += 1
else:
print("Error parsing labels, Please check data")
return
print("Total {} samples in dataset".format(total_cnt))
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/facenet/convert_wider_to_kitti.py |
import os
import shutil
from tqdm import tqdm
DATA_DIR=os.environ.get('LOCAL_DATA_DIR')
with open("imagenet_valprep.txt", "r") as f:
for line in tqdm(f):
img_name, dir_name = line.rstrip().split(" ")
target_dir = os.path.join(DATA_DIR, "imagenet", "val", dir_name)
os.makedirs(target_dir, exist_ok=True)
shutil.copyfile(os.path.join(DATA_DIR, "imagenet", "val", img_name), os.path.join(target_dir, img_name))
# This results in a validation directory like so:
#
# imagenet/val/
# βββ n01440764
# β βββ ILSVRC2012_val_00000293.JPEG
# β βββ ILSVRC2012_val_00002138.JPEG
# β βββ ......
# βββ ......
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/classification_tf2/byom_voc/prepare_imagenet.py |
import os
from os.path import join as join_path
import re
import glob
import shutil
from random import shuffle
from tqdm import tqdm
DATA_DIR=os.environ.get('LOCAL_DATA_DIR')
source_dir_orig = join_path(DATA_DIR, "VOCdevkit/VOC2012")
target_dir_orig = join_path(DATA_DIR, "formatted")
suffix = '_trainval.txt'
classes_dir = join_path(source_dir_orig, "ImageSets", "Main")
images_dir = join_path(source_dir_orig, "JPEGImages")
classes_files = glob.glob(classes_dir+"/*"+suffix)
for file in classes_files:
# get the filename and make output class folder
classname = os.path.basename(file)
if classname.endswith(suffix):
classname = classname[:-len(suffix)]
target_dir_path = join_path(target_dir_orig, classname)
if not os.path.exists(target_dir_path):
os.makedirs(target_dir_path)
else:
continue
with open(file) as f:
content = f.readlines()
for line in content:
tokens = re.split('\s+', line)
if tokens[1] == '1':
# copy this image into target dir_path
target_file_path = join_path(target_dir_path, tokens[0] + '.jpg')
src_file_path = join_path(images_dir, tokens[0] + '.jpg')
shutil.copyfile(src_file_path, target_file_path)
SOURCE_DIR=os.path.join(DATA_DIR, 'formatted')
TARGET_DIR=os.path.join(DATA_DIR,'split')
# list dir
dir_list = next(os.walk(SOURCE_DIR))[1]
# for each dir, create a new dir in split
for dir_i in tqdm(dir_list):
newdir_train = os.path.join(TARGET_DIR, 'train', dir_i)
newdir_val = os.path.join(TARGET_DIR, 'val', dir_i)
newdir_test = os.path.join(TARGET_DIR, 'test', dir_i)
if not os.path.exists(newdir_train):
os.makedirs(newdir_train)
if not os.path.exists(newdir_val):
os.makedirs(newdir_val)
if not os.path.exists(newdir_test):
os.makedirs(newdir_test)
img_list = glob.glob(os.path.join(SOURCE_DIR, dir_i, '*.jpg'))
# shuffle data
shuffle(img_list)
for j in range(int(len(img_list) * 0.7)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'train', os.path.join(dir_i, os.path.basename(img_list[j]))))
for j in range(int(len(img_list) * 0.7), int(len(img_list)*0.8)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'val', os.path.join(dir_i, os.path.basename(img_list[j]))))
for j in range(int(len(img_list) * 0.8), len(img_list)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'test', os.path.join(dir_i, os.path.basename(img_list[j]))))
print('Done splitting dataset.') | tao_tutorials-main | notebooks/tao_launcher_starter_kit/classification_tf2/byom_voc/prepare_voc.py |
# Convert RGB images to (fake) 16-bit grayscale
import os
import numpy as np
from PIL import Image
from tqdm import tqdm
from os.path import join as join_path
def to16bit(images_dir, img_file, images_dir_16_bit):
image = Image.open(os.path.join(images_dir,img_file)).convert("L")
# shifted to the higher byte to get a fake 16-bit image
image_np = np.array(image) * 256
image16 = Image.fromarray(image_np.astype(np.uint32))
# overwrite the image file
img_file = os.path.splitext(img_file)[0] + '.png'
image16.save(os.path.join(images_dir_16_bit, img_file))
# Generate 16-bit grayscale images for train/val splits
DATA_DIR = os.environ.get('LOCAL_DATA_DIR')
os.makedirs(os.path.join(DATA_DIR, "training", "image_2_16bit_grayscale"), exist_ok=True)
source_dir = join_path(DATA_DIR, "VOCdevkit/VOC2012")
images_dir = join_path(source_dir, "JPEGImages")
images_dir_16_bit = images_dir.replace('JPEGImages','JPEGImages_16bit_grayscale')
os.makedirs(images_dir_16_bit, exist_ok=True)
for img_file in tqdm(os.listdir(images_dir)):
to16bit(images_dir,img_file,images_dir_16_bit)
im = Image.open(join_path(images_dir_16_bit,'2008_007890.png'))
print("size:",im.size)
print("mode:",im.mode)
print("format:",im.format)
print(np.array(im).astype(np.uint32).shape)
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/classification_tf2/tao_voc/prepare_16bit.py |
import os
from os.path import join as join_path
import re
import glob
import shutil
import sys
from random import shuffle
from tqdm import tqdm
DATA_DIR=os.environ.get('LOCAL_DATA_DIR')
source_dir_orig = join_path(DATA_DIR, "VOCdevkit/VOC2012")
target_dir_orig = join_path(DATA_DIR, "formatted")
suffix = '_trainval.txt'
classes_dir = join_path(source_dir_orig, "ImageSets", "Main")
images_dir = join_path(source_dir_orig, sys.argv[1])
if not os.path.exists(images_dir):
raise FileNotFoundError(f"{images_dir} does not exist. Please check your path again")
# 16 bit image has .png extension
extension = '.jpg'
if 'grayscale' in sys.argv[1]:
extension = '.png'
classes_files = glob.glob(classes_dir+"/*"+suffix)
for file in classes_files:
# get the filename and make output class folder
classname = os.path.basename(file)
if classname.endswith(suffix):
classname = classname[:-len(suffix)]
target_dir_path = join_path(target_dir_orig, classname)
if not os.path.exists(target_dir_path):
os.makedirs(target_dir_path)
else:
continue
with open(file) as f:
content = f.readlines()
for line in content:
tokens = re.split('\s+', line)
if tokens[1] == '1':
# copy this image into target dir_path
target_file_path = join_path(target_dir_path, tokens[0] + extension)
src_file_path = join_path(images_dir, tokens[0] + extension)
shutil.copyfile(src_file_path, target_file_path)
SOURCE_DIR=os.path.join(DATA_DIR, 'formatted')
TARGET_DIR=os.path.join(DATA_DIR, sys.argv[2])
# list dir
dir_list = next(os.walk(SOURCE_DIR))[1]
# for each dir, create a new dir in split
for dir_i in tqdm(dir_list):
newdir_train = os.path.join(TARGET_DIR, 'train', dir_i)
newdir_val = os.path.join(TARGET_DIR, 'val', dir_i)
newdir_test = os.path.join(TARGET_DIR, 'test', dir_i)
if not os.path.exists(newdir_train):
os.makedirs(newdir_train)
if not os.path.exists(newdir_val):
os.makedirs(newdir_val)
if not os.path.exists(newdir_test):
os.makedirs(newdir_test)
img_list = glob.glob(os.path.join(SOURCE_DIR, dir_i, f'*{extension}'))
# shuffle data
shuffle(img_list)
for j in range(int(len(img_list) * 0.7)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'train', os.path.join(dir_i, os.path.basename(img_list[j]))))
for j in range(int(len(img_list) * 0.7), int(len(img_list)*0.8)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'val', os.path.join(dir_i, os.path.basename(img_list[j]))))
for j in range(int(len(img_list) * 0.8), len(img_list)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'test', os.path.join(dir_i, os.path.basename(img_list[j]))))
print('Done splitting dataset.') | tao_tutorials-main | notebooks/tao_launcher_starter_kit/classification_tf2/tao_voc/prepare_voc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for LPRNet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for LPRNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to OpenALPR's benchmark end2end us license plates."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT train/eval dataset."
)
return parser.parse_args(args)
def prepare_data(input_dir, img_list, output_dir):
"""Crop the license plates from the orginal images."""
target_img_path = os.path.join(output_dir, "image")
target_label_path = os.path.join(output_dir, "label")
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
for img_name in img_list:
img_path = os.path.join(input_dir, img_name)
label_path = os.path.join(input_dir,
img_name.split(".")[0] + ".txt")
img = cv2.imread(img_path)
with open(label_path, "r") as f:
label_lines = f.readlines()
assert len(label_lines) == 1
label_items = label_lines[0].split()
assert img_name == label_items[0]
xmin = int(label_items[1])
ymin = int(label_items[2])
width = int(label_items[3])
xmax = xmin + width
height = int(label_items[4])
ymax = ymin + height
lp = label_items[5]
cropped_lp = img[ymin:ymax, xmin:xmax, :]
# save img and label
cv2.imwrite(os.path.join(target_img_path, img_name), cropped_lp)
with open(os.path.join(target_label_path,
img_name.split(".")[0] + ".txt"), "w") as f:
f.write(lp)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
img_files = []
for file_name in os.listdir(args.input_dir):
if file_name.split(".")[-1] == "jpg":
img_files.append(file_name)
total_cnt = len(img_files)
train_cnt = int(total_cnt / 2)
val_cnt = total_cnt - train_cnt
train_img_list = img_files[0:train_cnt]
val_img_list = img_files[train_cnt + 1:]
print("Total {} samples in benchmark dataset".format(total_cnt))
print("{} for train and {} for val".format(train_cnt, val_cnt))
train_dir = os.path.join(args.output_dir, "train")
prepare_data(args.input_dir, train_img_list, train_dir)
val_dir = os.path.join(args.output_dir, "val")
prepare_data(args.input_dir, val_img_list, val_dir)
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/lprnet/preprocess_openalpr_benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPENet data conversion utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import cv2
import os
import numpy as np
import json
def get_keypoints_from_file(keypoints_file):
'''
This function reads the keypoints file from afw format.
Input:
keypoints_file (str): Path to the keypoints file.
Output:
keypoints (np.array): Keypoints in numpy format [[x, y], [x, y]].
'''
keypoints = []
with open(keypoints_file) as fid:
for line in fid:
if "version" in line or "points" in line or "{" in line or "}" in line:
continue
else:
loc_x, loc_y = line.strip().split(sep=" ")
keypoints.append([float(loc_x), float(loc_y)])
keypoints = np.array(keypoints, dtype=float)
assert keypoints.shape[1] == 2, "Keypoints should be 2D."
return keypoints
def convert_dataset(container_root_path, afw_data_path, output_json_path, afw_image_save_path, key_points=80):
'''
Function to convert afw dataset to Sloth format json.
Input:
afw_data_path (str): Path to afw data folder.
output_json_path (str): Path to output json file.
afw_image_save_path (str): Image paths to use in json.
Returns:
None
'''
# get dataset file lists
all_files = os.listdir(afw_data_path)
images = [x for x in all_files if x.endswith('.jpg')]
# keypoint_files = [img_path.split(".")[-2] + ".pts" for img_path in images]
output_folder = os.path.dirname(output_json_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# read and convert to sloth format
sloth_data = []
for image in images:
image_path = os.path.join(afw_data_path, image)
image_read = cv2.imread(image_path)
if image_read is None:
print('Bad image:{}'.format(image_path))
continue
# convert image to png
image_png = image.replace('.jpg', '.png')
cv2.imwrite(os.path.join(afw_data_path, image_png), image_read)
image_data = {}
image_data['filename'] = os.path.join(container_root_path, "data/afw", image_png)
image_data['class'] = 'image'
annotations = {}
annotations['tool-version'] = '1.0'
annotations['version'] = 'v1'
annotations['class'] = 'FiducialPoints'
keypoint_file = image.split(".")[-2] + ".pts"
image_keypoints = get_keypoints_from_file(os.path.join(afw_data_path, keypoint_file))
if key_points == 80:
for num, keypoint in enumerate(image_keypoints):
annotations["P{}x".format(num + 1)] = keypoint[0]
annotations["P{}y".format(num + 1)] = keypoint[1]
# fill in dummy keypoints for keypoints 69 to 80
for num in range(69, 81, 1):
annotations["P{}x".format(num)] = image_keypoints[0][0]
annotations["P{}y".format(num)] = image_keypoints[0][1]
annotations["P{}occluded".format(num)] = True
elif key_points == 10:
key_id = 1
for num, keypoint in enumerate(image_keypoints):
# change to 10-points dataset:
if (num + 1) in [1, 9, 17, 20, 25, 39, 45, 34, 49, 55]:
annotations["P{}x".format(key_id)] = keypoint[0]
annotations["P{}y".format(key_id)] = keypoint[1]
key_id += 1
else:
raise ValueError("This script only generates 10 & 80 keypoints dataset.")
image_data['annotations'] = [annotations]
sloth_data.append(image_data)
# save json
with open(output_json_path, "w") as config_file:
json.dump(sloth_data, config_file, indent=4)
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(
description='Transform dataset for FPENet tutorial')
parser.add_argument(
"--afw_data_path",
type=str,
required=True,
help="Input directory to AFW dataset imnages and ground truth keypoints."
)
parser.add_argument(
"--container_root_path",
type=str,
required=True,
help="Path of image folder with respect to the container"
)
parser.add_argument(
"--output_json_path",
type=str,
required=True,
help="Output json file path to save to."
)
parser.add_argument(
"--afw_image_save_path",
type=str,
required=True,
help="Image path to use in jsons."
)
parser.add_argument(
"--num_key_points",
type=int,
default=80,
help="Number of key points."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args()
convert_dataset(args.container_root_path, args.afw_data_path, args.output_json_path, args.afw_image_save_path, args.num_key_points)
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/fpenet/data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts Retail Product Checkout (https://www.kaggle.com/datasets/diyer22/retail-product-checkout-dataset) dataset to classification dataset. Ready for MLRecogNet training.
"""
import os, zipfile
import glob
import cv2
from pycocotools.coco import COCO
from tqdm import tqdm
import numpy as np
import shutil
def create_reference_set(dataset_dir, ref_dir, ref_num = 100):
os.makedirs(ref_dir, exist_ok=True)
classes = os.listdir(dataset_dir)
print(f"Creating reference set from {dataset_dir}...")
for class_name in tqdm(classes):
samples = os.listdir(os.path.join(dataset_dir, class_name))
if not os.path.exists(os.path.join(ref_dir, class_name)):
os.makedirs(os.path.join(ref_dir, class_name))
if len(samples) >= ref_num:
ref_samples = np.random.choice(samples, ref_num, replace=False)
else:
print(f"Warning: {class_name} has only {len(samples)} samples. Copying all samples to reference set.")
ref_samples = samples
for sample in ref_samples:
try:
shutil.copy(os.path.join(dataset_dir, class_name, sample), os.path.join(ref_dir, class_name, sample))
except:
pass
print("Done!")
def crop_images(file_path, bbox, class_id, output_dir):
file_name = os.path.basename(file_path)
class_folder = os.path.join(output_dir, class_id)
if not os.path.exists(class_folder):
os.mkdir(class_folder)
image_count = len(glob.glob( os.path.join(class_folder, file_name+"*.jpg")))
new_file_name = os.path.join(class_folder, file_name + f"_{image_count+1}.jpg")
if os.path.exists(new_file_name):
# skip if file already exists
return
# start processing image
x1, y1, x2, y2 = bbox
# skip if bbox is too small
if x2 < 120 or y2 < 150:
return
try:
image = cv2.imread(file_path)
h, w, _ = image.shape
except:
print(f"{file_path} is not a valid image file")
return
# give 14% margin to the bounding box
cropped_image = image[max(int(y1 - 0.07*y2), 0 ):min(int(y1+1.07*y2), h), \
max(int(x1 - 0.07*x2), 0 ):min(int(x1+1.07*x2), w)]
# resize to 256x256 for faster processing and training
resized_cropped_image = cv2.resize(cropped_image, (256, 256), cv2.INTER_AREA)
cv2.imwrite(os.path.join(class_folder, new_file_name), resized_cropped_image)
# load dataset
data_root_dir = os.path.join(os.environ['DATA_DIR'],"metric_learning_recognition")
path_to_zip_file = os.path.join(data_root_dir,"retail-product-checkout-dataset.zip")
directory_to_extract_to = os.path.join(data_root_dir, "retail-product-checkout-dataset")
processed_classification_dir = os.path.join(data_root_dir,"retail-product-checkout-dataset_classification_demo")
## unzip dataset
if not os.path.exists(processed_classification_dir):
os.makedirs(processed_classification_dir)
print("Unzipping dataset...")
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
directory_to_extract_to = os.path.join(directory_to_extract_to, "retail_product_checkout")
for dataset in ["train", "val", "test"]:
dataset_dir = os.path.join(directory_to_extract_to, dataset+"2019")
annotation_file = os.path.join(directory_to_extract_to, "instances_"+dataset+"2019.json")
output_dir = os.path.join(processed_classification_dir, dataset)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
## load coco dataset
print(f"Loading COCO {dataset} dataset...")
coco_label = COCO(annotation_file)
# crop images to classification data
for img_object in tqdm(coco_label.dataset["images"]):
image_path = os.path.join(dataset_dir, img_object["file_name"])
# remove top view images
if "camera2" in image_path:
continue
image_id = img_object["id"]
annotation_ids = coco_label.getAnnIds(imgIds=image_id)
for annot in coco_label.loadAnns(annotation_ids):
bbox = annot["bbox"]
class_id = annot["category_id"]
category = coco_label.loadCats(class_id)[0]
class_name = category["supercategory"] + "_" + category["name"]
crop_images(image_path, bbox, class_name, output_dir)
# extract a reference set from training set
## fixed random seed for reproducibility
np.random.seed(0)
create_reference_set(
os.path.join(processed_classification_dir, "train"), \
os.path.join(processed_classification_dir, "reference"), \
ref_num=100)
# split out unknown classes
# select 20% classes as unknown classes
class_list = os.listdir(os.path.join(processed_classification_dir, "train"))
total_class_num = len(class_list)
unknown_classes = np.random.choice(class_list, int(total_class_num*0.2), replace=False)
known_classes = [c for c in class_list if c not in unknown_classes]
known_classes_dir = os.path.join(processed_classification_dir, "known_classes")
unknown_classes_dir = os.path.join(processed_classification_dir, "unknown_classes")
for dataset in ["train", "val", "test", "reference"]:
known_classes_dataset_dir = os.path.join(known_classes_dir, dataset)
unknown_classes_dataset_dir = os.path.join(unknown_classes_dir, dataset)
if not os.path.exists(known_classes_dataset_dir):
os.makedirs(known_classes_dataset_dir)
if not os.path.exists(unknown_classes_dataset_dir):
os.makedirs(unknown_classes_dataset_dir)
for class_name in tqdm(known_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(known_classes_dataset_dir, class_name))
for class_name in tqdm(unknown_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(unknown_classes_dataset_dir, class_name))
# remove old folders
for dataset in ["train", "val", "test", "reference"]:
shutil.rmtree(os.path.join(processed_classification_dir, dataset))
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/metric_learning_recognition/process_retail_product_checkout_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import cv2
import csv
import ujson
classes = set([])
def read_kitti(prefix, label_file):
"Function wrapper to read kitti format labels txt file."
global classes
full_label_path = os.path.join(prefix, label_file)
if not full_label_path.endswith(".txt"):
return
if not os.path.exists(full_label_path):
raise ValueError("Labelfile : {} does not exist".format(full_label_path))
if os.path.isdir(full_label_path):
return
dict_list = []
image_name = full_label_path.replace("/labels", "/images").replace(".txt", ".jpg")
if not os.path.exists(image_name):
raise ValueError("Image : {} does not exist".format(image_name))
img = cv2.imread(image_name, 0)
height, width = img.shape[:2]
with open(full_label_path, 'r') as lf:
for row in csv.reader(lf, delimiter=' '):
classes.add(row[0])
dict_list.append({"class_name": row[0],
"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width,
"bbox": [float(row[4]), float(row[5]), float(row[6]) - float(row[4]), float(row[7]) - float(row[5])]})
if (dict_list == []):
dict_list = [{"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width}]
return dict_list
def construct_coco_json(labels_folder):
image_id = 0
annot_ctr = 0
labels = []
for file in os.listdir(labels_folder):
label = read_kitti(labels_folder, file)
labels.append(label)
categories = []
class_to_id_mapping = {}
for idx, object_class in enumerate(classes):
class_to_id_mapping[object_class] = idx + 1
categories.append({"supercategory": object_class, "id": idx + 1, "name": object_class})
coco_json = {"images": [], "annotations": [], "categories": categories}
for label in labels:
if not (label and len(label)):
continue
coco_json["images"].append({"file_name": label[0]["file_name"], "height": label[0]["height"], "width": label[0]["width"], "id": image_id})
for instance in label:
if ("bbox" in instance.keys()):
coco_json["annotations"].append({"bbox": instance["bbox"],
"image_id": image_id,
"id": annot_ctr,
"category_id": class_to_id_mapping[instance["class_name"]],
"bbox_mode": 1,
"segmentation": [],
"iscrowd": 0,
"area": float(instance["bbox"][2] * instance["bbox"][3])})
annot_ctr += 1
image_id += 1
return coco_json
label_folder = sys.argv[1]
coco_json = construct_coco_json(label_folder)
current_str = ujson.dumps(coco_json, indent=4)
with open(sys.argv[2] + "/annotations.json", "w") as json_out_file:
json_out_file.write(current_str)
label_map_extension = sys.argv[3]
with open(f"{sys.argv[2]}/label_map.{label_map_extension}", "w") as label_map_file:
for idx, class_name in enumerate(classes):
if label_map_extension == "yaml":
label_map_file.write(f"{idx+1}: '{class_name}'\n")
else:
label_map_file.write(f"{class_name}\n")
label_map_file.flush()
print(len(classes))
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/kitti/kitti_to_coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibration of KITTI dataset."""
import numpy as np
def get_calib_from_file(calib_file):
"""Get calibration from file."""
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
"""Calibration class."""
def __init__(self, calib_file):
"""Initialize."""
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pointpillars/calibration_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import sys
def drop_class(label_dir, classes):
"""drop label by class names."""
labels = os.listdir(label_dir)
labels = [os.path.join(label_dir, x) for x in labels]
for gt in labels:
print("Processing ", gt)
with open(gt) as f:
lines = f.readlines()
lines_ret = []
for line in lines:
ls = line.strip()
line = ls.split()
if line[0] in classes:
print("Dropping ", line[0])
continue
else:
lines_ret.append(ls)
with open(gt, "w") as fo:
out = '\n'.join(lines_ret)
fo.write(out)
if __name__ == "__main__":
drop_class(sys.argv[1], sys.argv[2].split(','))
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pointpillars/drop_class.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import argparse
import numpy as np
from object3d_kitti import get_objects_from_label
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Convert camera label to LiDAR label.")
parser.add_argument(
"-l", "--label_dir",
type=str, required=True,
help="Camera label directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR label directory"
)
return parser.parse_args()
def generate_lidar_labels(label_dir, calib_dir, output_dir):
"""Generate LiDAR labels from KITTI Camera labels."""
for lab in os.listdir(label_dir):
lab_file = os.path.join(label_dir, lab)
obj_list = get_objects_from_label(lab_file)
calib_file = os.path.join(calib_dir, lab)
calib = Calibration(calib_file)
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
loc_lidar = calib.rect_to_lidar(loc)
# update obj3d.loc
with open(os.path.join(output_dir, lab), "w") as lf:
for idx, lc in enumerate(loc_lidar):
# bottom center to 3D center
obj_list[idx].loc = (lc + np.array([0., 0., obj_list[idx].h / 2.]))
# rotation_y to rotation_z
obj_list[idx].ry = -np.pi / 2. - obj_list[idx].ry
lf.write(obj_list[idx].to_kitti_format())
lf.write('\n')
if __name__ == "__main__":
args = parse_args()
generate_lidar_labels(args.label_dir, args.calib_dir, args.output_dir)
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pointpillars/gen_lidar_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import sys
def split(list_file, lidar, label, output_lidar, output_label):
"""train/val split of the KITTI dataset."""
with open(list_file) as lf:
file_names = lf.readlines()
file_names = [f.strip() for f in file_names]
for li in os.listdir(lidar):
if li[:-4] in file_names:
os.rename(os.path.join(lidar, li), os.path.join(output_lidar, li))
for la in os.listdir(label):
if la[:-4] in file_names:
os.rename(os.path.join(label, la), os.path.join(output_label, la))
if __name__ == "__main__":
split(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pointpillars/kitti_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D object KITTI utils."""
import numpy as np
def get_objects_from_label(label_file):
"""Get objects from label."""
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
"""Convert class type to ID."""
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
"""Object3d class."""
def __init__(self, line):
"""Initialize."""
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10]) # noqa: E741
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
"""Get KITTI object difficult level."""
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
if height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
if height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
"""Convert to string."""
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, self.loc, self.ry)
return print_str
def to_kitti_format(self):
"""Convert to KITTI format."""
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], self.ry)
return kitti_str
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pointpillars/object3d_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import argparse
import numpy as np
from skimage import io
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Limit LIDAR points to FOV range.")
parser.add_argument(
"-p", "--points_dir",
type=str, required=True,
help="LIDAR points directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR points directory"
)
parser.add_argument(
"-i",
"--image_dir",
type=str, required=True,
help="image directory"
)
return parser.parse_args()
def get_fov_flag(pts_rect, img_shape, calib):
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_lidar_points(points_dir, calib_dir, output_dir, image_dir):
"""Limit LiDAR points to FOV range."""
for pts in os.listdir(points_dir):
pts_file = os.path.join(points_dir, pts)
points = np.fromfile(pts_file, dtype=np.float32).reshape(-1, 4)
calib_file = os.path.join(calib_dir, pts[:-4] + ".txt")
calib = Calibration(calib_file)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
img_file = os.path.join(image_dir, pts[:-4] + ".png")
img_shape = np.array(io.imread(img_file).shape[:2], dtype=np.int32)
fov_flag = get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
points.tofile(os.path.join(output_dir, pts))
# double check
points_cp = np.fromfile(os.path.join(output_dir, pts), dtype=np.float32).reshape(-1, 4)
assert np.equal(points, points_cp).all()
if __name__ == "__main__":
args = parse_args()
generate_lidar_points(
args.points_dir, args.calib_dir,
args.output_dir, args.image_dir
)
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pointpillars/gen_lidar_points.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for Unet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
from PIL import Image, ImageSequence
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for UNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to ISBI Tiff Files"
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to processes images from ISBI Tiff files."
)
return parser.parse_args(args)
def save_arrays_as_images(arr, im_dir):
"""Utility function to save the images to dir from arrays."""
for idx, arr in enumerate(arr):
img_name = os.path.join(im_dir, "image_{}.png".format(idx))
cv2.imwrite(img_name, arr)
def load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def check_and_create(d):
"""Utility function to create a dir if not present"""
if not os.path.isdir(d):
os.makedirs(d)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
train_images_tif = os.path.join(args.input_dir, "train-volume.tif")
train_masks_tif = os.path.join(args.input_dir, "train-labels.tif")
test_images_tif = os.path.join(args.input_dir, "test-volume.tif")
output_images_dir = os.path.join(args.output_dir, "images")
output_masks_dir = os.path.join(args.output_dir, "masks")
# Creating the images dir for train, test, val
train_images_dir = os.path.join(output_images_dir, "train")
val_images_dir = os.path.join(output_images_dir, "val")
test_images_dir = os.path.join(output_images_dir, "test")
train_masks_dir = os.path.join(output_masks_dir, "train")
val_masks_dir = os.path.join(output_masks_dir, "val")
check_and_create(train_images_dir)
check_and_create(val_images_dir)
check_and_create(test_images_dir)
check_and_create(train_masks_dir)
check_and_create(val_masks_dir)
train_np_arrays_images = load_multipage_tiff(train_images_tif)
train_np_arrays_masks = load_multipage_tiff(train_masks_tif)
test_np_arrays_images = load_multipage_tiff(test_images_tif)
# Splitting the train numpy arrays into train and val
train_np_arrays_images_final = train_np_arrays_images[:20, :, :]
train_np_arrays_masks_final = train_np_arrays_masks[:20, :, :]
val_np_arrays_images_final = train_np_arrays_images[20:, :, :]
val_np_arrays_masks_final = train_np_arrays_masks[20:, :, :]
# Saving the train arrays as images
save_arrays_as_images(train_np_arrays_images_final, train_images_dir)
save_arrays_as_images(train_np_arrays_masks_final, train_masks_dir)
# Saving the val arrays as images
save_arrays_as_images(val_np_arrays_images_final, val_images_dir)
save_arrays_as_images(val_np_arrays_masks_final, val_masks_dir)
# Saving the test arrays as images
save_arrays_as_images(test_np_arrays_images, test_images_dir)
print("Prepared data successfully !")
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/unet/prepare_data_isbi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import re
import random
import sys
def sample_dataset(input_dir, output_dir, n_samples, use_ids=None):
"""Select a subset of images fom input_dir and move them to output_dir.
Args:
input_dir (str): Input Folder Path of the train images.
output_dir (str): Output Folder Path of the test images.
n_samples (int): Number of samples to use.
use_ids(list int): List of IDs to grab from test and query folder.
Returns:
IDs used for sampling
"""
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
id_to_img = {}
# Grab images with matching ids
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid not in id_to_img:
id_to_img[pid] = []
id_to_img[pid].append(img_path)
# Create directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
assert id_to_img, "Dataset size cannot be 0."
sampled_id_to_img = dict(random.sample(list(id_to_img.items()), n_samples))
for key, img_paths in sampled_id_to_img.items():
for img_path in img_paths:
command = "cp " + img_path + " " + output_dir
os.system(command)
# Use same ids for test and query
if use_ids:
# Create query dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
# Find images in test with same id
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
for id in use_ids:
pattern = re.compile(r'([-\d]+)_c(\d)')
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if id == pid:
print(img_path)
command = "cp " + img_path + " " + output_dir
os.system(command)
return sampled_id_to_img.keys()
# Number of samples
n_samples = int(sys.argv[1])
data_dir = os.path.join(os.environ["DATA_DIR"], "market1501")
# Create train dataset
train_input_dir = os.path.join(data_dir, "bounding_box_train")
train_output_dir = os.path.join(data_dir, "sample_train")
sample_dataset(train_input_dir, train_output_dir, n_samples)
# Create test dataset
test_input_dir = os.path.join(data_dir, "bounding_box_test")
test_output_dir = os.path.join(data_dir, "sample_test")
ids = sample_dataset(test_input_dir, test_output_dir, n_samples)
# Create query dataset
query_input_dir = os.path.join(data_dir, "query")
query_output_dir = os.path.join(data_dir, "sample_query")
sample_dataset(query_input_dir, query_output_dir, n_samples, ids)
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/re_identification/obtain_subset_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
data_dir = os.path.join(os.environ["DATA_DIR"], "kinetics")
# front_raises: 134
# pull_ups: 255
# clean_and_jerk: 59
# presenting_weather_forecast: 254
# deadlifting: 88
selected_actions = {
134: 0,
255: 1,
59: 2,
254: 3,
88: 4
}
def select_actions(selected_actions, data_dir, split_name):
"""Select a subset of actions and their corresponding labels.
Args:
selected_actions (dict): Map from selected class IDs to new class IDs.
data_dir (str): Path to the directory of data arrays (.npy) and labels (.pkl).
split_name (str): Name of the split to be processed, e.g., "train" and "val".
Returns:
No explicit returns
"""
data_path = os.path.join(data_dir, f"{split_name}_data.npy")
label_path = os.path.join(data_dir, f"{split_name}_label.pkl")
data_array = np.load(file=data_path)
with open(label_path, "rb") as label_file:
labels = pickle.load(label_file)
assert (len(labels) == 2)
assert (data_array.shape[0] == len(labels[0]))
assert (len(labels[0]) == len(labels[1]))
print(f"No. total samples for {split_name}: {data_array.shape[0]}")
selected_indices = []
for i in range(data_array.shape[0]):
if labels[1][i] in selected_actions.keys():
selected_indices.append(i)
data_array = data_array[selected_indices, :, :, :, :]
selected_sample_names = [labels[0][x] for x in selected_indices]
selected_labels = [selected_actions[labels[1][x]] for x in selected_indices]
labels = (selected_sample_names, selected_labels)
print(f"No. selected samples for {split_name}: {data_array.shape[0]}")
np.save(file=data_path, arr=data_array, allow_pickle=False)
with open(label_path, "wb") as label_file:
pickle.dump(labels, label_file, protocol=4)
select_actions(selected_actions, data_dir, "train")
select_actions(selected_actions, data_dir, "val")
| tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/pose_classification/select_subset_actions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Clean the label to alphanumeric, non-sensitive (lower case). Filter the label with length larger than 25
import os
import re
import sys
from tqdm import tqdm
def preprocess_label(gt_file, filtered_file):
gt_list = open(gt_file, "r").readlines()
filtered_list = []
character_list = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for label_line in tqdm(gt_list):
try:
path, label = label_line.strip().split()
except Exception:
continue
path = path[:-1]
label = label.strip("\"")
if re.search(f"[^{character_list}]", label):
continue
else:
if len(label) <= 25:
label = label.lower() # ignore the case
filtered_list.append(f"{path}\t{label}\n")
with open(filtered_file, "w") as f:
f.writelines(filtered_list)
def main():
preprocess_label(sys.argv[1], sys.argv[2])
character_list = "0123456789abcdefghijklmnopqrstuvwxyz"
with open(os.path.join(os.getenv("DATA_DIR"), "character_list"), "w") as f:
for ch in character_list:
f.write(f"{ch}\n")
if __name__ == "__main__":
main() | tao_tutorials-main | notebooks/tao_api_starter_kit/api/dataset_prepare/ocrnet/preprocess_label.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for LPRNet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for LPRNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to OpenALPR's benchmark end2end us license plates."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT train/eval dataset."
)
return parser.parse_args(args)
def prepare_data(input_dir, img_list, output_dir):
"""Crop the license plates from the orginal images."""
target_img_path = os.path.join(output_dir, "image")
target_label_path = os.path.join(output_dir, "label")
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
for img_name in img_list:
img_path = os.path.join(input_dir, img_name)
label_path = os.path.join(input_dir,
img_name.split(".")[0] + ".txt")
img = cv2.imread(img_path)
with open(label_path, "r") as f:
label_lines = f.readlines()
assert len(label_lines) == 1
label_items = label_lines[0].split()
assert img_name == label_items[0]
xmin = int(label_items[1])
ymin = int(label_items[2])
width = int(label_items[3])
xmax = xmin + width
height = int(label_items[4])
ymax = ymin + height
lp = label_items[5]
cropped_lp = img[ymin:ymax, xmin:xmax, :]
# save img and label
cv2.imwrite(os.path.join(target_img_path, img_name), cropped_lp)
with open(os.path.join(target_label_path,
img_name.split(".")[0] + ".txt"), "w") as f:
f.write(lp)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
img_files = []
for file_name in os.listdir(args.input_dir):
if file_name.split(".")[-1] == "jpg":
img_files.append(file_name)
total_cnt = len(img_files)
train_cnt = int(total_cnt / 2)
val_cnt = total_cnt - train_cnt
train_img_list = img_files[0:train_cnt]
val_img_list = img_files[train_cnt + 1:]
print("Total {} samples in benchmark dataset".format(total_cnt))
print("{} for train and {} for val".format(train_cnt, val_cnt))
train_dir = os.path.join(args.output_dir, "train")
prepare_data(args.input_dir, train_img_list, train_dir)
val_dir = os.path.join(args.output_dir, "val")
prepare_data(args.input_dir, val_img_list, val_dir)
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/lprnet/preprocess_openalpr_benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPENet data conversion utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import cv2
import os
import numpy as np
import json
def get_keypoints_from_file(keypoints_file):
'''
This function reads the keypoints file from afw format.
Input:
keypoints_file (str): Path to the keypoints file.
Output:
keypoints (np.array): Keypoints in numpy format [[x, y], [x, y]].
'''
keypoints = []
with open(keypoints_file) as fid:
for line in fid:
if "version" in line or "points" in line or "{" in line or "}" in line:
continue
else:
loc_x, loc_y = line.strip().split(sep=" ")
keypoints.append([float(loc_x), float(loc_y)])
keypoints = np.array(keypoints, dtype=float)
assert keypoints.shape[1] == 2, "Keypoints should be 2D."
return keypoints
def convert_dataset(container_root_path, afw_data_path, output_json_path, afw_image_save_path, key_points=80):
'''
Function to convert afw dataset to Sloth format json.
Input:
afw_data_path (str): Path to afw data folder.
output_json_path (str): Path to output json file.
afw_image_save_path (str): Image paths to use in json.
Returns:
None
'''
# get dataset file lists
all_files = os.listdir(afw_data_path)
images = [x for x in all_files if x.endswith('.jpg')]
# keypoint_files = [img_path.split(".")[-2] + ".pts" for img_path in images]
output_folder = os.path.dirname(output_json_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# read and convert to sloth format
sloth_data = []
for image in images:
image_path = os.path.join(afw_data_path, image)
image_read = cv2.imread(image_path)
if image_read is None:
print('Bad image:{}'.format(image_path))
continue
# convert image to png
image_png = image.replace('.jpg', '.png')
cv2.imwrite(os.path.join(afw_data_path, image_png), image_read)
image_data = {}
image_data['filename'] = os.path.join(container_root_path, "data/afw", image_png)
image_data['class'] = 'image'
annotations = {}
annotations['tool-version'] = '1.0'
annotations['version'] = 'v1'
annotations['class'] = 'FiducialPoints'
keypoint_file = image.split(".")[-2] + ".pts"
image_keypoints = get_keypoints_from_file(os.path.join(afw_data_path, keypoint_file))
if key_points == 80:
for num, keypoint in enumerate(image_keypoints):
annotations["P{}x".format(num + 1)] = keypoint[0]
annotations["P{}y".format(num + 1)] = keypoint[1]
# fill in dummy keypoints for keypoints 69 to 80
for num in range(69, 81, 1):
annotations["P{}x".format(num)] = image_keypoints[0][0]
annotations["P{}y".format(num)] = image_keypoints[0][1]
annotations["P{}occluded".format(num)] = True
elif key_points == 10:
key_id = 1
for num, keypoint in enumerate(image_keypoints):
# change to 10-points dataset:
if (num + 1) in [1, 9, 17, 20, 25, 39, 45, 34, 49, 55]:
annotations["P{}x".format(key_id)] = keypoint[0]
annotations["P{}y".format(key_id)] = keypoint[1]
key_id += 1
else:
raise ValueError("This script only generates 10 & 80 keypoints dataset.")
image_data['annotations'] = [annotations]
sloth_data.append(image_data)
# save json
with open(output_json_path, "w") as config_file:
json.dump(sloth_data, config_file, indent=4)
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(
description='Transform dataset for FPENet tutorial')
parser.add_argument(
"--afw_data_path",
type=str,
required=True,
help="Input directory to AFW dataset imnages and ground truth keypoints."
)
parser.add_argument(
"--container_root_path",
type=str,
required=True,
help="Path of image folder with respect to the container"
)
parser.add_argument(
"--output_json_path",
type=str,
required=True,
help="Output json file path to save to."
)
parser.add_argument(
"--afw_image_save_path",
type=str,
required=True,
help="Image path to use in jsons."
)
parser.add_argument(
"--num_key_points",
type=int,
default=80,
help="Number of key points."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args()
convert_dataset(args.container_root_path, args.afw_data_path, args.output_json_path, args.afw_image_save_path, args.num_key_points)
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/fpenet/data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import h5py
import cv2
import os
import csv
def build_command_line_parser(parser=None):
"""Build command line parser for dataset_convert.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='process_cohface',
description='Convert COHFACE into heartratenet api compatible dataset',
)
parser.add_argument('-i', '--input_path',
type=str,
required=True,
help='Input path for COHFACE, this is the root of the dataset')
parser.add_argument('-o', '--output_path',
type=str,
required=True,
help='Output path for COHFACE, this is the root of the dataset')
parser.add_argument('-start_subject_id', '--start_subject_id',
type=int,
required=True,
help='Start subject id for COHFACE')
parser.add_argument('-end_subject_id', '--end_subject_id',
type=int,
required=True,
help='End subject id for COHFACE')
parser.add_argument('-b', '--breathing_rate',
action='store_true',
default=False,
help='If true, processes the dataset for breathing rate, else exports heart rate')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args()
return args_parsed
def get_timestamp_from_video(video_filename):
"""get video timestamp.
Args:
video_filename (str): video filename
Returns:
timestamps(list of float): a list of timestamps for each frame in video
"""
cap = cv2.VideoCapture(video_filename)
# fps = cap.get(cv2.CAP_PROP_FPS)
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC) / 1000] # convert MSEC to SEC
# calc_timestamps = [0.0]
while (cap.isOpened()):
frame_exists, curr_frame = cap.read()
if frame_exists:
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
else:
break
cap.release()
return timestamps
def process_subject(path, output, breathing=False):
"""convert COHFACE data format for subject.
Args:
path (str): input dataset path
output (str): output dataset path after format conversion
breathing (bool): whether get heartrate signal or breathrate signal
Returns:
None
"""
video_file = os.path.join(path, 'data.avi')
vidcap = cv2.VideoCapture(video_file)
fps = vidcap.get(cv2.CAP_PROP_FPS)
timestamps = [vidcap.get(cv2.CAP_PROP_POS_MSEC) / 1000] # convert MSEC to SEC
print(f'Processing {video_file}, fps {fps}')
subject_file = h5py.File(os.path.join(path, 'data.hdf5'), 'r')
# Processing video
count = 0
while vidcap.isOpened():
success, image = vidcap.read()
if success:
cv2.imwrite(os.path.join(output, 'images', format(count, '04d') + '.bmp'), image)
count += 1
timestamps.append(vidcap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
else:
break
vidcap.release()
# Processing image time stamps
image_file = os.path.join(output, 'image_timestamps.csv')
with open(image_file, 'w') as file:
header = ['ID', 'Time']
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for frame, time in zip(range(count), timestamps):
writer.writerow({'ID': frame,
'Time': time})
pulse_time = subject_file['time']
if breathing:
pulse = subject_file['respiration']
else:
pulse = subject_file['pulse']
# Processing pulse
pulse_file = os.path.join(output, 'ground_truth.csv')
with open(pulse_file, 'w') as file:
header = ['Time', 'PulseWaveform']
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for time, pulse_val in zip(pulse_time, pulse):
writer.writerow({'Time': time,
'PulseWaveform': pulse_val})
def main(cl_args=None):
"""process cohface.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args_parsed = parse_command_line(cl_args)
input_path = args_parsed.input_path
output_path = args_parsed.output_path
start_subject_id = args_parsed.start_subject_id
end_subject_id = args_parsed.end_subject_id
breathing_flag = args_parsed.breathing_rate
session_number = 4
for sub in range(start_subject_id, end_subject_id):
for fol in range(session_number):
input_dir = os.path.join(input_path, str(sub), str(fol))
output_dir = os.path.join(output_path, str(sub), str(fol))
os.makedirs(os.path.join(output_dir, 'images'))
process_subject(input_dir, output_dir, breathing=breathing_flag)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/heartratenet/process_cohface.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts Retail Product Checkout (https://www.kaggle.com/datasets/diyer22/retail-product-checkout-dataset) dataset to classification dataset. Ready for MLRecogNet training.
"""
import os, zipfile
import glob
import cv2
from pycocotools.coco import COCO
from tqdm import tqdm
import numpy as np
import shutil
def create_reference_set(dataset_dir, ref_dir, ref_num = 100):
os.makedirs(ref_dir, exist_ok=True)
classes = os.listdir(dataset_dir)
print(f"Creating reference set from {dataset_dir}...")
for class_name in tqdm(classes):
samples = os.listdir(os.path.join(dataset_dir, class_name))
if not os.path.exists(os.path.join(ref_dir, class_name)):
os.makedirs(os.path.join(ref_dir, class_name))
if len(samples) >= ref_num:
ref_samples = np.random.choice(samples, ref_num, replace=False)
else:
print(f"Warning: {class_name} has only {len(samples)} samples. Copying all samples to reference set.")
ref_samples = samples
for sample in ref_samples:
try:
shutil.copy(os.path.join(dataset_dir, class_name, sample), os.path.join(ref_dir, class_name, sample))
except:
pass
print("Done!")
def crop_images(file_path, bbox, class_id, output_dir):
file_name = os.path.basename(file_path)
class_folder = os.path.join(output_dir, class_id)
if not os.path.exists(class_folder):
os.mkdir(class_folder)
image_count = len(glob.glob( os.path.join(class_folder, file_name+"*.jpg")))
new_file_name = os.path.join(class_folder, file_name + f"_{image_count+1}.jpg")
if os.path.exists(new_file_name):
# skip if file already exists
return
# start processing image
x1, y1, x2, y2 = bbox
# skip if bbox is too small
if x2 < 120 or y2 < 150:
return
try:
image = cv2.imread(file_path)
h, w, _ = image.shape
except:
print(f"{file_path} is not a valid image file")
return
# give 14% margin to the bounding box
cropped_image = image[max(int(y1 - 0.07*y2), 0 ):min(int(y1+1.07*y2), h), \
max(int(x1 - 0.07*x2), 0 ):min(int(x1+1.07*x2), w)]
# resize to 256x256 for faster processing and training
resized_cropped_image = cv2.resize(cropped_image, (256, 256), cv2.INTER_AREA)
cv2.imwrite(os.path.join(class_folder, new_file_name), resized_cropped_image)
# load dataset
data_root_dir = os.path.join(os.environ['DATA_DIR'],"metric_learning_recognition")
path_to_zip_file = os.path.join(data_root_dir,"retail-product-checkout-dataset.zip")
directory_to_extract_to = os.path.join(data_root_dir, "retail-product-checkout-dataset")
processed_classification_dir = os.path.join(data_root_dir,"retail-product-checkout-dataset_classification_demo")
## unzip dataset
if not os.path.exists(processed_classification_dir):
os.makedirs(processed_classification_dir)
print("Unzipping dataset...")
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
directory_to_extract_to = os.path.join(directory_to_extract_to, "retail_product_checkout")
for dataset in ["train", "val", "test"]:
dataset_dir = os.path.join(directory_to_extract_to, dataset+"2019")
annotation_file = os.path.join(directory_to_extract_to, "instances_"+dataset+"2019.json")
output_dir = os.path.join(processed_classification_dir, dataset)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
## load coco dataset
print(f"Loading COCO {dataset} dataset...")
coco_label = COCO(annotation_file)
# crop images to classification data
for img_object in tqdm(coco_label.dataset["images"]):
image_path = os.path.join(dataset_dir, img_object["file_name"])
# remove top view images
if "camera2" in image_path:
continue
image_id = img_object["id"]
annotation_ids = coco_label.getAnnIds(imgIds=image_id)
for annot in coco_label.loadAnns(annotation_ids):
bbox = annot["bbox"]
class_id = annot["category_id"]
category = coco_label.loadCats(class_id)[0]
class_name = category["supercategory"] + "_" + category["name"]
crop_images(image_path, bbox, class_name, output_dir)
# extract a reference set from training set
## fixed random seed for reproducibility
np.random.seed(0)
create_reference_set(
os.path.join(processed_classification_dir, "train"), \
os.path.join(processed_classification_dir, "reference"), \
ref_num=100)
# split out unknown classes
# select 20% classes as unknown classes
class_list = os.listdir(os.path.join(processed_classification_dir, "train"))
total_class_num = len(class_list)
unknown_classes = np.random.choice(class_list, int(total_class_num*0.2), replace=False)
known_classes = [c for c in class_list if c not in unknown_classes]
known_classes_dir = os.path.join(processed_classification_dir, "known_classes")
unknown_classes_dir = os.path.join(processed_classification_dir, "unknown_classes")
for dataset in ["train", "val", "test", "reference"]:
known_classes_dataset_dir = os.path.join(known_classes_dir, dataset)
unknown_classes_dataset_dir = os.path.join(unknown_classes_dir, dataset)
if not os.path.exists(known_classes_dataset_dir):
os.makedirs(known_classes_dataset_dir)
if not os.path.exists(unknown_classes_dataset_dir):
os.makedirs(unknown_classes_dataset_dir)
for class_name in tqdm(known_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(known_classes_dataset_dir, class_name))
for class_name in tqdm(unknown_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(unknown_classes_dataset_dir, class_name))
# remove old folders
for dataset in ["train", "val", "test", "reference"]:
shutil.rmtree(os.path.join(processed_classification_dir, dataset))
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/metric_learning_recognition/process_retail_product_checkout_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import cv2
import csv
import ujson
classes = set([])
def read_kitti(prefix, label_file):
"Function wrapper to read kitti format labels txt file."
global classes
full_label_path = os.path.join(prefix, label_file)
if not full_label_path.endswith(".txt"):
return
if not os.path.exists(full_label_path):
raise ValueError("Labelfile : {} does not exist".format(full_label_path))
if os.path.isdir(full_label_path):
return
dict_list = []
image_name = full_label_path.replace("/labels", "/images").replace(".txt", ".jpg")
if not os.path.exists(image_name):
raise ValueError("Image : {} does not exist".format(image_name))
img = cv2.imread(image_name, 0)
height, width = img.shape[:2]
with open(full_label_path, 'r') as lf:
for row in csv.reader(lf, delimiter=' '):
classes.add(row[0])
dict_list.append({"class_name": row[0],
"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width,
"bbox": [float(row[4]), float(row[5]), float(row[6]) - float(row[4]), float(row[7]) - float(row[5])]})
if (dict_list == []):
dict_list = [{"file_name": label_file.replace(".txt", ".jpg"),
"height": height,
"width": width}]
return dict_list
def construct_coco_json(labels_folder):
image_id = 0
annot_ctr = 0
labels = []
for file in os.listdir(labels_folder):
label = read_kitti(labels_folder, file)
labels.append(label)
categories = []
class_to_id_mapping = {}
for idx, object_class in enumerate(classes):
class_to_id_mapping[object_class] = idx + 1
categories.append({"supercategory": object_class, "id": idx + 1, "name": object_class})
coco_json = {"images": [], "annotations": [], "categories": categories}
for label in labels:
if not (label and len(label)):
continue
coco_json["images"].append({"file_name": label[0]["file_name"], "height": label[0]["height"], "width": label[0]["width"], "id": image_id})
for instance in label:
if ("bbox" in instance.keys()):
coco_json["annotations"].append({"bbox": instance["bbox"],
"image_id": image_id,
"id": annot_ctr,
"category_id": class_to_id_mapping[instance["class_name"]],
"bbox_mode": 1,
"segmentation": [],
"iscrowd": 0,
"area": float(instance["bbox"][2] * instance["bbox"][3])})
annot_ctr += 1
image_id += 1
return coco_json
label_folder = sys.argv[1]
coco_json = construct_coco_json(label_folder)
current_str = ujson.dumps(coco_json, indent=4)
with open(sys.argv[2] + "/annotations.json", "w") as json_out_file:
json_out_file.write(current_str)
label_map_extension = sys.argv[3]
with open(f"{sys.argv[2]}/label_map.{label_map_extension}", "w") as label_map_file:
for idx, class_name in enumerate(classes):
if label_map_extension == "yaml":
label_map_file.write(f"{idx+1}: '{class_name}'\n")
else:
label_map_file.write(f"{class_name}\n")
label_map_file.flush()
print(len(classes))
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/kitti/kitti_to_coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibration of KITTI dataset."""
import numpy as np
def get_calib_from_file(calib_file):
"""Get calibration from file."""
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
"""Calibration class."""
def __init__(self, calib_file):
"""Initialize."""
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pointpillars/calibration_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def drop_class(label_dir, classes):
"""drop label by class names."""
labels = os.listdir(label_dir)
labels = [os.path.join(label_dir, x) for x in labels]
for gt in labels:
print("Processing ", gt)
with open(gt) as f:
lines = f.readlines()
lines_ret = []
for line in lines:
ls = line.strip()
line = ls.split()
if line[0] in classes:
print("Dropping ", line[0])
continue
else:
lines_ret.append(ls)
with open(gt, "w") as fo:
out = '\n'.join(lines_ret)
fo.write(out)
if __name__ == "__main__":
drop_class(sys.argv[1], sys.argv[2].split(','))
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pointpillars/drop_class.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
from object3d_kitti import get_objects_from_label
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Convert camera label to LiDAR label.")
parser.add_argument(
"-l", "--label_dir",
type=str, required=True,
help="Camera label directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR label directory"
)
return parser.parse_args()
def generate_lidar_labels(label_dir, calib_dir, output_dir):
"""Generate LiDAR labels from KITTI Camera labels."""
for lab in os.listdir(label_dir):
lab_file = os.path.join(label_dir, lab)
obj_list = get_objects_from_label(lab_file)
calib_file = os.path.join(calib_dir, lab)
calib = Calibration(calib_file)
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
loc_lidar = calib.rect_to_lidar(loc)
# update obj3d.loc
with open(os.path.join(output_dir, lab), "w") as lf:
for idx, lc in enumerate(loc_lidar):
# bottom center to 3D center
obj_list[idx].loc = (lc + np.array([0., 0., obj_list[idx].h / 2.]))
# rotation_y to rotation_z
obj_list[idx].ry = -np.pi / 2. - obj_list[idx].ry
lf.write(obj_list[idx].to_kitti_format())
lf.write('\n')
if __name__ == "__main__":
args = parse_args()
generate_lidar_labels(args.label_dir, args.calib_dir, args.output_dir)
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pointpillars/gen_lidar_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def split(list_file, lidar, label, output_lidar, output_label):
"""train/val split of the KITTI dataset."""
with open(list_file) as lf:
file_names = lf.readlines()
file_names = [f.strip() for f in file_names]
for li in os.listdir(lidar):
if li[:-4] in file_names:
os.rename(os.path.join(lidar, li), os.path.join(output_lidar, li))
for la in os.listdir(label):
if la[:-4] in file_names:
os.rename(os.path.join(label, la), os.path.join(output_label, la))
if __name__ == "__main__":
split(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pointpillars/kitti_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def get_objects_from_label(label_file):
"""Get objects from label."""
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
"""Convert class type to ID."""
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
"""Object3d class."""
def __init__(self, line):
"""Initialize."""
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10]) # noqa: E741
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
"""Get KITTI object difficult level."""
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
if height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
if height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
"""Convert to string."""
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, self.loc, self.ry)
return print_str
def to_kitti_format(self):
"""Convert to KITTI format."""
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], self.ry)
return kitti_str
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pointpillars/object3d_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import numpy as np
from skimage import io
from calibration_kitti import Calibration
def parse_args():
parser = argparse.ArgumentParser("Limit LIDAR points to FOV range.")
parser.add_argument(
"-p", "--points_dir",
type=str, required=True,
help="LIDAR points directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR points directory"
)
parser.add_argument(
"-i",
"--image_dir",
type=str, required=True,
help="image directory"
)
return parser.parse_args()
def get_fov_flag(pts_rect, img_shape, calib):
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_lidar_points(points_dir, calib_dir, output_dir, image_dir):
"""Limit LiDAR points to FOV range."""
for pts in os.listdir(points_dir):
pts_file = os.path.join(points_dir, pts)
points = np.fromfile(pts_file, dtype=np.float32).reshape(-1, 4)
calib_file = os.path.join(calib_dir, pts[:-4] + ".txt")
calib = Calibration(calib_file)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
img_file = os.path.join(image_dir, pts[:-4] + ".png")
img_shape = np.array(io.imread(img_file).shape[:2], dtype=np.int32)
fov_flag = get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
points.tofile(os.path.join(output_dir, pts))
# double check
points_cp = np.fromfile(os.path.join(output_dir, pts), dtype=np.float32).reshape(-1, 4)
assert np.equal(points, points_cp).all()
if __name__ == "__main__":
args = parse_args()
generate_lidar_points(
args.points_dir, args.calib_dir,
args.output_dir, args.image_dir
)
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pointpillars/gen_lidar_points.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prepare train/val dataset for Unet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
from PIL import Image, ImageSequence
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for UNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to ISBI Tiff Files"
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to processes images from ISBI Tiff files."
)
return parser.parse_args(args)
def save_arrays_as_images(arr, im_dir):
"""Utility function to save the images to dir from arrays."""
for idx, arr in enumerate(arr):
img_name = os.path.join(im_dir, "image_{}.png".format(idx))
cv2.imwrite(img_name, arr)
def load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def check_and_create(d):
"""Utility function to create a dir if not present"""
if not os.path.isdir(d):
os.makedirs(d)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
train_images_tif = os.path.join(args.input_dir, "train-volume.tif")
train_masks_tif = os.path.join(args.input_dir, "train-labels.tif")
test_images_tif = os.path.join(args.input_dir, "test-volume.tif")
output_images_dir = os.path.join(args.output_dir, "images")
output_masks_dir = os.path.join(args.output_dir, "masks")
# Creating the images dir for train, test, val
train_images_dir = os.path.join(output_images_dir, "train")
val_images_dir = os.path.join(output_images_dir, "val")
test_images_dir = os.path.join(output_images_dir, "test")
train_masks_dir = os.path.join(output_masks_dir, "train")
val_masks_dir = os.path.join(output_masks_dir, "val")
check_and_create(train_images_dir)
check_and_create(val_images_dir)
check_and_create(test_images_dir)
check_and_create(train_masks_dir)
check_and_create(val_masks_dir)
train_np_arrays_images = load_multipage_tiff(train_images_tif)
train_np_arrays_masks = load_multipage_tiff(train_masks_tif)
test_np_arrays_images = load_multipage_tiff(test_images_tif)
# Splitting the train numpy arrays into train and val
train_np_arrays_images_final = train_np_arrays_images[:20, :, :]
train_np_arrays_masks_final = train_np_arrays_masks[:20, :, :]
val_np_arrays_images_final = train_np_arrays_images[20:, :, :]
val_np_arrays_masks_final = train_np_arrays_masks[20:, :, :]
# Saving the train arrays as images
save_arrays_as_images(train_np_arrays_images_final, train_images_dir)
save_arrays_as_images(train_np_arrays_masks_final, train_masks_dir)
# Saving the val arrays as images
save_arrays_as_images(val_np_arrays_images_final, val_images_dir)
save_arrays_as_images(val_np_arrays_masks_final, val_masks_dir)
# Saving the test arrays as images
save_arrays_as_images(test_np_arrays_images, test_images_dir)
print("Prepared data successfully !")
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/unet/prepare_data_isbi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import re
import random
import sys
def sample_dataset(input_dir, output_dir, n_samples, use_ids=None):
"""Select a subset of images fom input_dir and move them to output_dir.
Args:
input_dir (str): Input Folder Path of the train images.
output_dir (str): Output Folder Path of the test images.
n_samples (int): Number of samples to use.
use_ids(list int): List of IDs to grab from test and query folder.
Returns:
IDs used for sampling
"""
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
id_to_img = {}
# Grab images with matching ids
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid not in id_to_img:
id_to_img[pid] = []
id_to_img[pid].append(img_path)
# Create directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
assert id_to_img, "Dataset size cannot be 0."
sampled_id_to_img = dict(random.sample(list(id_to_img.items()), n_samples))
for key, img_paths in sampled_id_to_img.items():
for img_path in img_paths:
command = "cp " + img_path + " " + output_dir
os.system(command)
# Use same ids for test and query
if use_ids:
# Create query dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
command = "rm -r " + output_dir
os.system(command)
os.makedirs(output_dir)
# Find images in test with same id
img_paths = glob.glob(os.path.join(input_dir, '*.jpg'))
for id in use_ids:
pattern = re.compile(r'([-\d]+)_c(\d)')
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if id == pid:
print(img_path)
command = "cp " + img_path + " " + output_dir
os.system(command)
return sampled_id_to_img.keys()
# Number of samples
n_samples = int(sys.argv[1])
data_dir = os.path.join(os.environ["DATA_DIR"], "market1501")
# Create train dataset
train_input_dir = os.path.join(data_dir, "bounding_box_train")
train_output_dir = os.path.join(data_dir, "sample_train")
sample_dataset(train_input_dir, train_output_dir, n_samples)
# Create test dataset
test_input_dir = os.path.join(data_dir, "bounding_box_test")
test_output_dir = os.path.join(data_dir, "sample_test")
ids = sample_dataset(test_input_dir, test_output_dir, n_samples)
# Create query dataset
query_input_dir = os.path.join(data_dir, "query")
query_output_dir = os.path.join(data_dir, "sample_query")
sample_dataset(query_input_dir, query_output_dir, n_samples, ids)
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/re_identification/obtain_subset_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
data_dir = os.path.join(os.environ["DATA_DIR"], "kinetics")
# front_raises: 134
# pull_ups: 255
# clean_and_jerk: 59
# presenting_weather_forecast: 254
# deadlifting: 88
selected_actions = {
134: 0,
255: 1,
59: 2,
254: 3,
88: 4
}
def select_actions(selected_actions, data_dir, split_name):
"""Select a subset of actions and their corresponding labels.
Args:
selected_actions (dict): Map from selected class IDs to new class IDs.
data_dir (str): Path to the directory of data arrays (.npy) and labels (.pkl).
split_name (str): Name of the split to be processed, e.g., "train" and "val".
Returns:
No explicit returns
"""
data_path = os.path.join(data_dir, f"{split_name}_data.npy")
label_path = os.path.join(data_dir, f"{split_name}_label.pkl")
data_array = np.load(file=data_path)
with open(label_path, "rb") as label_file:
labels = pickle.load(label_file)
assert (len(labels) == 2)
assert (data_array.shape[0] == len(labels[0]))
assert (len(labels[0]) == len(labels[1]))
print(f"No. total samples for {split_name}: {data_array.shape[0]}")
selected_indices = []
for i in range(data_array.shape[0]):
if labels[1][i] in selected_actions.keys():
selected_indices.append(i)
data_array = data_array[selected_indices, :, :, :, :]
selected_sample_names = [labels[0][x] for x in selected_indices]
selected_labels = [selected_actions[labels[1][x]] for x in selected_indices]
labels = (selected_sample_names, selected_labels)
print(f"No. selected samples for {split_name}: {data_array.shape[0]}")
np.save(file=data_path, arr=data_array, allow_pickle=False)
with open(label_path, "wb") as label_file:
pickle.dump(labels, label_file, protocol=4)
select_actions(selected_actions, data_dir, "train")
select_actions(selected_actions, data_dir, "val")
| tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/pose_classification/select_subset_actions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Clean the label to alphanumeric, non-sensitive (lower case). Filter the label with length larger than 25
import os
import re
import sys
from tqdm import tqdm
def preprocess_label(gt_file, filtered_file):
gt_list = open(gt_file, "r").readlines()
filtered_list = []
character_list = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for label_line in tqdm(gt_list):
try:
path, label = label_line.strip().split()
except Exception:
continue
path = path[:-1]
label = label.strip("\"")
if re.search(f"[^{character_list}]", label):
continue
else:
if len(label) <= 25:
label = label.lower() # ignore the case
filtered_list.append(f"{path}\t{label}\n")
with open(filtered_file, "w") as f:
f.writelines(filtered_list)
def main():
preprocess_label(sys.argv[1], sys.argv[2])
character_list = "0123456789abcdefghijklmnopqrstuvwxyz"
with open(os.path.join(os.getenv("DATA_DIR"), "character_list"), "w") as f:
for ch in character_list:
f.write(f"{ch}\n")
if __name__ == "__main__":
main() | tao_tutorials-main | notebooks/tao_api_starter_kit/client/dataset_prepare/ocrnet/preprocess_label.py |
#!/usr/bin/python
from __future__ import division
import numpy as np
import xgboost as xgb
# label need to be 0 to num_class -1
data = np.loadtxt('./dermatology.data', delimiter=',',
converters={33: lambda x:int(x == '?'), 34: lambda x:int(x) - 1})
sz = data.shape
train = data[:int(sz[0] * 0.7), :]
test = data[int(sz[0] * 0.7):, :]
train_X = train[:, :33]
train_Y = train[:, 34]
test_X = test[:, :33]
test_Y = test[:, 34]
xg_train = xgb.DMatrix(train_X, label=train_Y)
xg_test = xgb.DMatrix(test_X, label=test_Y)
# setup parameters for xgboost
param = {}
# use softmax multi-class classification
param['objective'] = 'multi:softmax'
# scale weight of positive examples
param['eta'] = 0.1
param['max_depth'] = 6
param['nthread'] = 4
param['num_class'] = 6
watchlist = [(xg_train, 'train'), (xg_test, 'test')]
num_round = 5
bst = xgb.train(param, xg_train, num_round, watchlist)
# get prediction
pred = bst.predict(xg_test)
error_rate = np.sum(pred != test_Y) / test_Y.shape[0]
print('Test error using softmax = {}'.format(error_rate))
# do the same thing again, but output probabilities
param['objective'] = 'multi:softprob'
bst = xgb.train(param, xg_train, num_round, watchlist)
# Note: this convention has been changed since xgboost-unity
# get prediction, this is in 1D array, need reshape to (ndata, nclass)
pred_prob = bst.predict(xg_test).reshape(test_Y.shape[0], 6)
pred_label = np.argmax(pred_prob, axis=1)
error_rate = np.sum(pred_label != test_Y) / test_Y.shape[0]
print('Test error using softprob = {}'.format(error_rate))
| spark-xgboost-nv-release_1.4.0 | demo/multiclass_classification/train.py |
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from dask import array as da
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
import cupy as cp
import argparse
def using_dask_matrix(client: Client, X, y):
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 2,
# Golden line for GPU training
'tree_method': 'gpu_hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print('Evaluation history:', history)
return prediction
def using_quantile_device_dmatrix(client: Client, X, y):
'''`DaskDeviceQuantileDMatrix` is a data type specialized for `gpu_hist`, tree
method that reduces memory overhead. When training on GPU pipeline, it's
preferred over `DaskDMatrix`.
.. versionadded:: 1.2.0
'''
# Input must be on GPU for `DaskDeviceQuantileDMatrix`.
X = X.map_blocks(cp.array)
y = y.map_blocks(cp.array)
# `DaskDeviceQuantileDMatrix` is used instead of `DaskDMatrix`, be careful
# that it can not be used for anything else than training.
dtrain = dxgb.DaskDeviceQuantileDMatrix(client, X, y)
output = xgb.dask.train(client,
{'verbosity': 2,
'tree_method': 'gpu_hist'},
dtrain,
num_boost_round=4)
prediction = xgb.dask.predict(client, output, X)
return prediction
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--ddqdm', choices=[0, 1], type=int, default=1,
help='''Whether should we use `DaskDeviceQuantileDMatrix`''')
args = parser.parse_args()
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker
# process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
if args.ddqdm == 1:
print('Using DaskDeviceQuantileDMatrix')
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
else:
print('Using DMatrix')
from_dmatrix = using_dask_matrix(client, X, y)
| spark-xgboost-nv-release_1.4.0 | demo/dask/gpu_training.py |
import xgboost as xgb
from xgboost.dask import DaskDMatrix
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 1,
'tree_method': 'hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print('Evaluation history:', history)
return prediction
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
| spark-xgboost-nv-release_1.4.0 | demo/dask/cpu_training.py |
'''Dask interface demo:
Use scikit-learn regressor interface with GPU histogram tree method.'''
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from dask import array as da
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
regressor.set_params(tree_method='gpu_hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)
| spark-xgboost-nv-release_1.4.0 | demo/dask/sklearn_gpu_training.py |
'''Dask interface demo:
Use scikit-learn regressor interface with CPU histogram tree method.'''
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method='hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
| spark-xgboost-nv-release_1.4.0 | demo/dask/sklearn_cpu_training.py |
#!/usr/bin/python
import sys
import random
if len(sys.argv) < 2:
print ('Usage:<filename> <k> [nfold = 5]')
exit(0)
random.seed( 10 )
k = int( sys.argv[2] )
if len(sys.argv) > 3:
nfold = int( sys.argv[3] )
else:
nfold = 5
fi = open( sys.argv[1], 'r' )
ftr = open( sys.argv[1]+'.train', 'w' )
fte = open( sys.argv[1]+'.test', 'w' )
for l in fi:
if random.randint( 1 , nfold ) == k:
fte.write( l )
else:
ftr.write( l )
fi.close()
ftr.close()
fte.close()
| spark-xgboost-nv-release_1.4.0 | demo/CLI/binary_classification/mknfold.py |
#!/usr/bin/python
def loadfmap( fname ):
fmap = {}
nmap = {}
for l in open( fname ):
arr = l.split()
if arr[0].find('.') != -1:
idx = int( arr[0].strip('.') )
assert idx not in fmap
fmap[ idx ] = {}
ftype = arr[1].strip(':')
content = arr[2]
else:
content = arr[0]
for it in content.split(','):
if it.strip() == '':
continue
k , v = it.split('=')
fmap[ idx ][ v ] = len(nmap)
nmap[ len(nmap) ] = ftype+'='+k
return fmap, nmap
def write_nmap( fo, nmap ):
for i in range( len(nmap) ):
fo.write('%d\t%s\ti\n' % (i, nmap[i]) )
# start here
fmap, nmap = loadfmap( 'agaricus-lepiota.fmap' )
fo = open( 'featmap.txt', 'w' )
write_nmap( fo, nmap )
fo.close()
fo = open( 'agaricus.txt', 'w' )
for l in open( 'agaricus-lepiota.data' ):
arr = l.split(',')
if arr[0] == 'p':
fo.write('1')
else:
assert arr[0] == 'e'
fo.write('0')
for i in range( 1,len(arr) ):
fo.write( ' %d:1' % fmap[i][arr[i].strip()] )
fo.write('\n')
fo.close()
| spark-xgboost-nv-release_1.4.0 | demo/CLI/binary_classification/mapfeat.py |
import sys
fo = open(sys.argv[2], 'w')
for l in open(sys.argv[1]):
arr = l.split(',')
fo.write('%s' % arr[0])
for i in range(len(arr) - 1):
fo.write(' %d:%s' % (i, arr[i+1]))
fo.close()
| spark-xgboost-nv-release_1.4.0 | demo/CLI/yearpredMSD/csv2libsvm.py |
#!/usr/bin/python
import sys
import random
if len(sys.argv) < 2:
print('Usage:<filename> <k> [nfold = 5]')
exit(0)
random.seed(10)
k = int(sys.argv[2])
if len(sys.argv) > 3:
nfold = int(sys.argv[3])
else:
nfold = 5
fi = open(sys.argv[1], 'r')
ftr = open(sys.argv[1] + '.train', 'w')
fte = open(sys.argv[1] + '.test', 'w')
for l in fi:
if random.randint(1, nfold) == k:
fte.write(l)
else:
ftr.write(l)
fi.close()
ftr.close()
fte.close()
| spark-xgboost-nv-release_1.4.0 | demo/CLI/regression/mknfold.py |
#!/usr/bin/python
fo = open('machine.txt', 'w')
cnt = 6
fmap = {}
for l in open('machine.data'):
arr = l.split(',')
fo.write(arr[8])
for i in range(0, 6):
fo.write(' %d:%s' % (i, arr[i + 2]))
if arr[0] not in fmap:
fmap[arr[0]] = cnt
cnt += 1
fo.write(' %d:1' % fmap[arr[0]])
fo.write('\n')
fo.close()
# create feature map for machine data
fo = open('featmap.txt', 'w')
# list from machine.names
names = [
'vendor', 'MYCT', 'MMIN', 'MMAX', 'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP'
]
for i in range(0, 6):
fo.write('%d\t%s\tint\n' % (i, names[i + 1]))
for v, k in sorted(fmap.items(), key=lambda x: x[1]):
fo.write('%d\tvendor=%s\ti\n' % (k, v))
fo.close()
| spark-xgboost-nv-release_1.4.0 | demo/CLI/regression/mapfeat.py |
'''Demonstration for parsing JSON tree model file generated by XGBoost. The
support is experimental, output schema is subject to change in the future.
'''
import json
import argparse
class Tree:
'''A tree built by XGBoost.'''
# Index into node array
_left = 0
_right = 1
_parent = 2
_ind = 3
_cond = 4
_default_left = 5
# Index into stat array
_loss_chg = 0
_sum_hess = 1
_base_weight = 2
def __init__(self, tree_id: int, nodes, stats):
self.tree_id = tree_id
self.nodes = nodes
self.stats = stats
def loss_change(self, node_id: int):
'''Loss gain of a node.'''
return self.stats[node_id][self._loss_chg]
def sum_hessian(self, node_id: int):
'''Sum Hessian of a node.'''
return self.stats[node_id][self._sum_hess]
def base_weight(self, node_id: int):
'''Base weight of a node.'''
return self.stats[node_id][self._base_weight]
def split_index(self, node_id: int):
'''Split feature index of node.'''
return self.nodes[node_id][self._ind]
def split_condition(self, node_id: int):
'''Split value of a node.'''
return self.nodes[node_id][self._cond]
def parent(self, node_id: int):
'''Parent ID of a node.'''
return self.nodes[node_id][self._parent]
def left_child(self, node_id: int):
'''Left child ID of a node.'''
return self.nodes[node_id][self._left]
def right_child(self, node_id: int):
'''Right child ID of a node.'''
return self.nodes[node_id][self._right]
def is_leaf(self, node_id: int):
'''Whether a node is leaf.'''
return self.nodes[node_id][self._left] == -1
def is_deleted(self, node_id: int):
'''Whether a node is deleted.'''
# std::numeric_limits<uint32_t>::max()
return self.nodes[node_id][self._ind] == 4294967295
def __str__(self):
stacks = [0]
nodes = []
while stacks:
node = {}
nid = stacks.pop()
node['node id'] = nid
node['gain'] = self.loss_change(nid)
node['cover'] = self.sum_hessian(nid)
nodes.append(node)
if not self.is_leaf(nid) and not self.is_deleted(nid):
left = self.left_child(nid)
right = self.right_child(nid)
stacks.append(left)
stacks.append(right)
string = '\n'.join(map(lambda x: ' ' + str(x), nodes))
return string
class Model:
'''Gradient boosted tree model.'''
def __init__(self, model: dict):
'''Construct the Model from JSON object.
parameters
----------
m: A dictionary loaded by json
'''
# Basic property of a model
self.learner_model_shape = model['learner']['learner_model_param']
self.num_output_group = int(self.learner_model_shape['num_class'])
self.num_feature = int(self.learner_model_shape['num_feature'])
self.base_score = float(self.learner_model_shape['base_score'])
# A field encoding which output group a tree belongs
self.tree_info = model['learner']['gradient_booster']['model'][
'tree_info']
model_shape = model['learner']['gradient_booster']['model'][
'gbtree_model_param']
# JSON representation of trees
j_trees = model['learner']['gradient_booster']['model']['trees']
# Load the trees
self.num_trees = int(model_shape['num_trees'])
self.leaf_size = int(model_shape['size_leaf_vector'])
# Right now XGBoost doesn't support vector leaf yet
assert self.leaf_size == 0, str(self.leaf_size)
trees = []
for i in range(self.num_trees):
tree = j_trees[i]
tree_id = int(tree['id'])
assert tree_id == i, (tree_id, i)
# properties
left_children = tree['left_children']
right_children = tree['right_children']
parents = tree['parents']
split_conditions = tree['split_conditions']
split_indices = tree['split_indices']
default_left = tree['default_left']
# stats
base_weights = tree['base_weights']
loss_changes = tree['loss_changes']
sum_hessian = tree['sum_hessian']
stats = []
nodes = []
# We resemble the structure used inside XGBoost, which is similar
# to adjacency list.
for node_id in range(len(left_children)):
nodes.append([
left_children[node_id], right_children[node_id],
parents[node_id], split_indices[node_id],
split_conditions[node_id], default_left[node_id]
])
stats.append([
loss_changes[node_id], sum_hessian[node_id],
base_weights[node_id]
])
tree = Tree(tree_id, nodes, stats)
trees.append(tree)
self.trees = trees
def print_model(self):
for i, tree in enumerate(self.trees):
print('tree_id:', i)
print(tree)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Demonstration for loading and printing XGBoost model.')
parser.add_argument('--model',
type=str,
required=True,
help='Path to JSON model file.')
args = parser.parse_args()
with open(args.model, 'r') as fd:
model = json.load(fd)
model = Model(model)
model.print_model()
| spark-xgboost-nv-release_1.4.0 | demo/json-model/json_parser.py |
import xgboost as xgb
from sklearn.datasets import make_classification
import dask
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
def main(client):
# Inform XGBoost that RMM is used for GPU memory allocation
xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
X = dask.array.from_array(X)
y = dask.array.from_array(y)
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
params = {'max_depth': 8, 'eta': 0.01, 'objective': 'multi:softprob', 'num_class': 3,
'tree_method': 'gpu_hist', 'eval_metric': 'merror'}
output = xgb.dask.train(client, params, dtrain, num_boost_round=100,
evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
for i, e in enumerate(history['train']['merror']):
print(f'[{i}] train-merror: {e}')
if __name__ == '__main__':
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option to
# LocalCUDACluster constructor.
with LocalCUDACluster(rmm_pool_size='2GB') as cluster:
with Client(cluster) as client:
main(client)
| spark-xgboost-nv-release_1.4.0 | demo/rmm_plugin/rmm_mgpu_with_dask.py |
import xgboost as xgb
import rmm
from sklearn.datasets import make_classification
# Initialize RMM pool allocator
rmm.reinitialize(pool_allocator=True)
# Inform XGBoost that RMM is used for GPU memory allocation
xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
dtrain = xgb.DMatrix(X, label=y)
params = {'max_depth': 8, 'eta': 0.01, 'objective': 'multi:softprob', 'num_class': 3,
'tree_method': 'gpu_hist'}
# XGBoost will automatically use the RMM pool allocator
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, 'train')])
| spark-xgboost-nv-release_1.4.0 | demo/rmm_plugin/rmm_singlegpu.py |
"""
Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model, using Optuna
to tune hyperparameters
"""
from sklearn.model_selection import ShuffleSplit
import pandas as pd
import numpy as np
import xgboost as xgb
import optuna
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
df = pd.read_csv('../data/veterans_lung_cancer.csv')
print('Training data:')
print(df)
# Split features and labels
y_lower_bound = df['Survival_label_lower_bound']
y_upper_bound = df['Survival_label_upper_bound']
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
# Split data into training and validation sets
rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0)
train_index, valid_index = next(rs.split(X))
dtrain = xgb.DMatrix(X.values[train_index, :])
dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index])
dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index])
dvalid = xgb.DMatrix(X.values[valid_index, :])
dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index])
dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index])
# Define hyperparameter search space
base_params = {'verbosity': 0,
'objective': 'survival:aft',
'eval_metric': 'aft-nloglik',
'tree_method': 'hist'} # Hyperparameters common to all trials
def objective(trial):
params = {'learning_rate': trial.suggest_loguniform('learning_rate', 0.01, 1.0),
'aft_loss_distribution': trial.suggest_categorical('aft_loss_distribution',
['normal', 'logistic', 'extreme']),
'aft_loss_distribution_scale': trial.suggest_loguniform('aft_loss_distribution_scale', 0.1, 10.0),
'max_depth': trial.suggest_int('max_depth', 3, 8),
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)} # Search space
params.update(base_params)
pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'valid-aft-nloglik')
bst = xgb.train(params, dtrain, num_boost_round=10000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=50, verbose_eval=False, callbacks=[pruning_callback])
if bst.best_iteration >= 25:
return bst.best_score
else:
return np.inf # Reject models with < 25 trees
# Run hyperparameter search
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=200)
print('Completed hyperparameter tuning with best aft-nloglik = {}.'.format(study.best_trial.value))
params = {}
params.update(base_params)
params.update(study.best_trial.params)
# Re-run training with the best hyperparameter combination
print('Re-running the best trial... params = {}'.format(params))
bst = xgb.train(params, dtrain, num_boost_round=10000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=50)
# Run prediction on the validation set
df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index],
'Label (upper bound)': y_upper_bound[valid_index],
'Predicted label': bst.predict(dvalid)})
print(df)
# Show only data points with right-censored labels
print(df[np.isinf(df['Label (upper bound)'])])
# Save trained model
bst.save_model('aft_best_model.json')
| spark-xgboost-nv-release_1.4.0 | demo/aft_survival/aft_survival_demo_with_optuna.py |
"""
Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model
"""
import os
from sklearn.model_selection import ShuffleSplit
import pandas as pd
import numpy as np
import xgboost as xgb
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
CURRENT_DIR = os.path.dirname(__file__)
df = pd.read_csv(os.path.join(CURRENT_DIR, '../data/veterans_lung_cancer.csv'))
print('Training data:')
print(df)
# Split features and labels
y_lower_bound = df['Survival_label_lower_bound']
y_upper_bound = df['Survival_label_upper_bound']
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
# Split data into training and validation sets
rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0)
train_index, valid_index = next(rs.split(X))
dtrain = xgb.DMatrix(X.values[train_index, :])
dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index])
dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index])
dvalid = xgb.DMatrix(X.values[valid_index, :])
dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index])
dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index])
# Train gradient boosted trees using AFT loss and metric
params = {'verbosity': 0,
'objective': 'survival:aft',
'eval_metric': 'aft-nloglik',
'tree_method': 'hist',
'learning_rate': 0.05,
'aft_loss_distribution': 'normal',
'aft_loss_distribution_scale': 1.20,
'max_depth': 6,
'lambda': 0.01,
'alpha': 0.02}
bst = xgb.train(params, dtrain, num_boost_round=10000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=50)
# Run prediction on the validation set
df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index],
'Label (upper bound)': y_upper_bound[valid_index],
'Predicted label': bst.predict(dvalid)})
print(df)
# Show only data points with right-censored labels
print(df[np.isinf(df['Label (upper bound)'])])
# Save trained model
bst.save_model('aft_model.json')
| spark-xgboost-nv-release_1.4.0 | demo/aft_survival/aft_survival_demo.py |
"""
Visual demo for survival analysis (regression) with Accelerated Failure Time (AFT) model.
This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The ensemble model
starts out as a flat line and evolves into a step function in order to account for all ranged
labels.
"""
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 13})
# Function to visualize censored labels
def plot_censored_labels(X, y_lower, y_upper):
def replace_inf(x, target_value):
x[np.isinf(x)] = target_value
return x
plt.plot(X, y_lower, 'o', label='y_lower', color='blue')
plt.plot(X, y_upper, 'o', label='y_upper', color='fuchsia')
plt.vlines(X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000),
label='Range for y', color='gray')
# Toy data
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
INF = np.inf
y_lower = np.array([ 10, 15, -INF, 30, 100])
y_upper = np.array([INF, INF, 20, 50, INF])
# Visualize toy data
plt.figure(figsize=(5, 4))
plot_censored_labels(X, y_lower, y_upper)
plt.ylim((6, 200))
plt.legend(loc='lower right')
plt.title('Toy data')
plt.xlabel('Input feature')
plt.ylabel('Label')
plt.yscale('log')
plt.tight_layout()
plt.show(block=True)
# Will be used to visualize XGBoost model
grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
# Train AFT model using XGBoost
dmat = xgb.DMatrix(X)
dmat.set_float_info('label_lower_bound', y_lower)
dmat.set_float_info('label_upper_bound', y_upper)
params = {'max_depth': 3, 'objective':'survival:aft', 'min_child_weight': 0}
accuracy_history = []
def plot_intermediate_model_callback(env):
"""Custom callback to plot intermediate models"""
# Compute y_pred = prediction using the intermediate model, at current boosting iteration
y_pred = env.model.predict(dmat)
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
# the corresponding predicted label (y_pred)
acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100)
accuracy_history.append(acc)
# Plot ranged labels as well as predictions by the model
plt.subplot(5, 3, env.iteration + 1)
plot_censored_labels(X, y_lower, y_upper)
y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts))
plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4)
plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8)
plt.xlim((0.8, 5.2))
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
plt.yscale('log')
res = {}
plt.figure(figsize=(12,13))
bst = xgb.train(params, dmat, 15, [(dmat, 'train')], evals_result=res,
callbacks=[plot_intermediate_model_callback])
plt.tight_layout()
plt.legend(loc='lower center', ncol=4,
bbox_to_anchor=(0.5, 0),
bbox_transform=plt.gcf().transFigure)
plt.tight_layout()
# Plot negative log likelihood over boosting iterations
plt.figure(figsize=(8,3))
plt.subplot(1, 2, 1)
plt.plot(res['train']['aft-nloglik'], 'b-o', label='aft-nloglik')
plt.xlabel('# Boosting Iterations')
plt.legend(loc='best')
# Plot "accuracy" over boosting iterations
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
# the corresponding predicted label (y_pred)
plt.subplot(1, 2, 2)
plt.plot(accuracy_history, 'r-o', label='Accuracy (%)')
plt.xlabel('# Boosting Iterations')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
| spark-xgboost-nv-release_1.4.0 | demo/aft_survival/aft_survival_viz_demo.py |
#!/usr/bin/python
import numpy as np
import xgboost as xgb
### load data in do training
train = np.loadtxt('./data/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s'.encode('utf-8')) } )
label = train[:,32]
data = train[:,1:31]
weight = train[:,31]
dtrain = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
param = {'max_depth':6, 'eta':0.1, 'objective':'binary:logitraw', 'nthread':4}
num_round = 120
print ('running cross validation, with preprocessing function')
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label==1)
param['scale_pos_weight'] = ratio
wtrain = dtrain.get_weight()
wtest = dtest.get_weight()
sum_weight = sum(wtrain) + sum(wtest)
wtrain *= sum_weight / sum(wtrain)
wtest *= sum_weight / sum(wtest)
dtrain.set_weight(wtrain)
dtest.set_weight(wtest)
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'[email protected]', 'auc'}, seed = 0, fpreproc = fpreproc)
| spark-xgboost-nv-release_1.4.0 | demo/kaggle-higgs/higgs-cv.py |
#!/usr/bin/python
# this is the example script to use xgboost to train
import numpy as np
import xgboost as xgb
test_size = 550000
# path to where the data lies
dpath = 'data'
# load in training data, directly use numpy
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s'.encode('utf-8')) } )
print ('finish loading from csv ')
label = dtrain[:,32]
data = dtrain[:,1:31]
# rescale weight to make it same as test set
weight = dtrain[:,31] * float(test_size) / len(label)
sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 )
sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 )
# print weight statistics
print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos ))
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
# setup parameters for xgboost
param = {}
# use logistic regression loss, use raw prediction before logistic transformation
# since we only need the rank
param['objective'] = 'binary:logitraw'
# scale weight of positive examples
param['scale_pos_weight'] = sum_wneg/sum_wpos
param['eta'] = 0.1
param['max_depth'] = 6
param['eval_metric'] = 'auc'
param['nthread'] = 16
# you can directly throw param in, though we want to watch multiple metrics here
plst = list(param.items())+[('eval_metric', '[email protected]')]
watchlist = [ (xgmat,'train') ]
# boost 120 trees
num_round = 120
print ('loading data end, start to boost trees')
bst = xgb.train( plst, xgmat, num_round, watchlist );
# save out model
bst.save_model('higgs.model')
print ('finish training')
| spark-xgboost-nv-release_1.4.0 | demo/kaggle-higgs/higgs-numpy.py |
#!/usr/bin/python
# this is the example script to use xgboost to train
import numpy as np
import xgboost as xgb
from sklearn.ensemble import GradientBoostingClassifier
import time
test_size = 550000
# path to where the data lies
dpath = 'data'
# load in training data, directly use numpy
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s') } )
print ('finish loading from csv ')
label = dtrain[:,32]
data = dtrain[:,1:31]
# rescale weight to make it same as test set
weight = dtrain[:,31] * float(test_size) / len(label)
sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 )
sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 )
# print weight statistics
print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos ))
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
# setup parameters for xgboost
param = {}
# use logistic regression loss
param['objective'] = 'binary:logitraw'
# scale weight of positive examples
param['scale_pos_weight'] = sum_wneg/sum_wpos
param['bst:eta'] = 0.1
param['bst:max_depth'] = 6
param['eval_metric'] = 'auc'
param['nthread'] = 4
plst = param.items()+[('eval_metric', '[email protected]')]
watchlist = [ (xgmat,'train') ]
# boost 10 trees
num_round = 10
print ('loading data end, start to boost trees')
print ("training GBM from sklearn")
tmp = time.time()
gbm = GradientBoostingClassifier(n_estimators=num_round, max_depth=6, verbose=2)
gbm.fit(data, label)
print ("sklearn.GBM costs: %s seconds" % str(time.time() - tmp))
#raw_input()
print ("training xgboost")
threads = [1, 2, 4, 16]
for i in threads:
param['nthread'] = i
tmp = time.time()
plst = param.items()+[('eval_metric', '[email protected]')]
bst = xgb.train( plst, xgmat, num_round, watchlist );
print ("XGBoost with %d thread costs: %s seconds" % (i, str(time.time() - tmp)))
print ('finish training')
| spark-xgboost-nv-release_1.4.0 | demo/kaggle-higgs/speedtest.py |
#!/usr/bin/python
# make prediction
import numpy as np
import xgboost as xgb
# path to where the data lies
dpath = 'data'
modelfile = 'higgs.model'
outfile = 'higgs.pred.csv'
# make top 15% as positive
threshold_ratio = 0.15
# load in training data, directly use numpy
dtest = np.loadtxt( dpath+'/test.csv', delimiter=',', skiprows=1 )
data = dtest[:,1:31]
idx = dtest[:,0]
print ('finish loading from csv ')
xgmat = xgb.DMatrix( data, missing = -999.0 )
bst = xgb.Booster({'nthread':16}, model_file = modelfile)
ypred = bst.predict( xgmat )
res = [ ( int(idx[i]), ypred[i] ) for i in range(len(ypred)) ]
rorder = {}
for k, v in sorted( res, key = lambda x:-x[1] ):
rorder[ k ] = len(rorder) + 1
# write out predictions
ntop = int( threshold_ratio * len(rorder ) )
fo = open(outfile, 'w')
nhit = 0
ntot = 0
fo.write('EventId,RankOrder,Class\n')
for k, v in res:
if rorder[k] <= ntop:
lb = 's'
nhit += 1
else:
lb = 'b'
# change output rank order to follow Kaggle convention
fo.write('%s,%d,%s\n' % ( k, len(rorder)+1-rorder[k], lb ) )
ntot += 1
fo.close()
print ('finished writing into prediction file')
| spark-xgboost-nv-release_1.4.0 | demo/kaggle-higgs/higgs-pred.py |
import os
import xgboost as xgb
### simple example for using external memory version
# this is the only difference, add a # followed by a cache prefix name
# several cache file with the prefix will be generated
# currently only support convert from libsvm file
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train#dtrain.cache'))
dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test#dtest.cache'))
# specify validations set to watch performance
param = {'max_depth':2, 'eta':1, 'objective':'binary:logistic'}
# performance notice: set nthread to be the number of your real cpu
# some cpu offer two threads per core, for example, a 4 core cpu with 8 threads, in such case set nthread=4
#param['nthread']=num_real_cpu
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/external_memory.py |
'''Demo for creating customized multi-class objective function. This demo is
only applicable after (excluding) XGBoost 1.0.0, as before this version XGBoost
returns transformed prediction for multi-class objective function. More
details in comments.
'''
import numpy as np
import xgboost as xgb
from matplotlib import pyplot as plt
import argparse
np.random.seed(1994)
kRows = 100
kCols = 10
kClasses = 4 # number of classes
kRounds = 10 # number of boosting rounds.
# Generate some random data for demo.
X = np.random.randn(kRows, kCols)
y = np.random.randint(0, 4, size=kRows)
m = xgb.DMatrix(X, y)
def softmax(x):
'''Softmax function with x as input vector.'''
e = np.exp(x)
return e / np.sum(e)
def softprob_obj(predt: np.ndarray, data: xgb.DMatrix):
'''Loss function. Computing the gradient and approximated hessian (diagonal).
Reimplements the `multi:softprob` inside XGBoost.
'''
labels = data.get_label()
if data.get_weight().size == 0:
# Use 1 as weight if we don't have custom weight.
weights = np.ones((kRows, 1), dtype=float)
else:
weights = data.get_weight()
# The prediction is of shape (rows, classes), each element in a row
# represents a raw prediction (leaf weight, hasn't gone through softmax
# yet). In XGBoost 1.0.0, the prediction is transformed by a softmax
# function, fixed in later versions.
assert predt.shape == (kRows, kClasses)
grad = np.zeros((kRows, kClasses), dtype=float)
hess = np.zeros((kRows, kClasses), dtype=float)
eps = 1e-6
# compute the gradient and hessian, slow iterations in Python, only
# suitable for demo. Also the one in native XGBoost core is more robust to
# numeric overflow as we don't do anything to mitigate the `exp` in
# `softmax` here.
for r in range(predt.shape[0]):
target = labels[r]
p = softmax(predt[r, :])
for c in range(predt.shape[1]):
assert target >= 0 or target <= kClasses
g = p[c] - 1.0 if c == target else p[c]
g = g * weights[r]
h = max((2.0 * p[c] * (1.0 - p[c]) * weights[r]).item(), eps)
grad[r, c] = g
hess[r, c] = h
# Right now (XGBoost 1.0.0), reshaping is necessary
grad = grad.reshape((kRows * kClasses, 1))
hess = hess.reshape((kRows * kClasses, 1))
return grad, hess
def predict(booster: xgb.Booster, X):
'''A customized prediction function that converts raw prediction to
target class.
'''
# Output margin means we want to obtain the raw prediction obtained from
# tree leaf weight.
predt = booster.predict(X, output_margin=True)
out = np.zeros(kRows)
for r in range(predt.shape[0]):
# the class with maximum prob (not strictly prob as it haven't gone
# through softmax yet so it doesn't sum to 1, but result is the same
# for argmax).
i = np.argmax(predt[r])
out[r] = i
return out
def merror(predt: np.ndarray, dtrain: xgb.DMatrix):
y = dtrain.get_label()
# Like custom objective, the predt is untransformed leaf weight
assert predt.shape == (kRows, kClasses)
out = np.zeros(kRows)
for r in range(predt.shape[0]):
i = np.argmax(predt[r])
out[r] = i
assert y.shape == out.shape
errors = np.zeros(kRows)
errors[y != out] = 1.0
return 'PyMError', np.sum(errors) / kRows
def plot_history(custom_results, native_results):
fig, axs = plt.subplots(2, 1)
ax0 = axs[0]
ax1 = axs[1]
pymerror = custom_results['train']['PyMError']
merror = native_results['train']['merror']
x = np.arange(0, kRounds, 1)
ax0.plot(x, pymerror, label='Custom objective')
ax0.legend()
ax1.plot(x, merror, label='multi:softmax')
ax1.legend()
plt.show()
def main(args):
custom_results = {}
# Use our custom objective function
booster_custom = xgb.train({'num_class': kClasses,
'disable_default_eval_metric': True},
m,
num_boost_round=kRounds,
obj=softprob_obj,
feval=merror,
evals_result=custom_results,
evals=[(m, 'train')])
predt_custom = predict(booster_custom, m)
native_results = {}
# Use the same objective function defined in XGBoost.
booster_native = xgb.train({'num_class': kClasses,
'eval_metric': 'merror'},
m,
num_boost_round=kRounds,
evals_result=native_results,
evals=[(m, 'train')])
predt_native = booster_native.predict(m)
# We are reimplementing the loss function in XGBoost, so it should
# be the same for normal cases.
assert np.all(predt_custom == predt_native)
np.testing.assert_allclose(custom_results['train']['PyMError'],
native_results['train']['merror'])
if args.plot != 0:
plot_history(custom_results, native_results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Arguments for custom softmax objective function demo.')
parser.add_argument(
'--plot',
type=int,
default=1,
help='Set to 0 to disable plotting the evaluation history.')
args = parser.parse_args()
main(args)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/custom_softmax.py |
'''A demo for defining data iterator.
.. versionadded:: 1.2.0
The demo that defines a customized iterator for passing batches of data into
`xgboost.DeviceQuantileDMatrix` and use this `DeviceQuantileDMatrix` for
training. The feature is used primarily designed to reduce the required GPU
memory for training on distributed environment.
Aftering going through the demo, one might ask why don't we use more native
Python iterator? That's because XGBoost requires a `reset` function, while
using `itertools.tee` might incur significant memory usage according to:
https://docs.python.org/3/library/itertools.html#itertools.tee.
'''
import xgboost
import cupy
import numpy
COLS = 64
ROWS_PER_BATCH = 1000 # data is splited by rows
BATCHES = 32
class IterForDMatrixDemo(xgboost.core.DataIter):
'''A data iterator for XGBoost DMatrix.
`reset` and `next` are required for any data iterator, other functions here
are utilites for demonstration's purpose.
'''
def __init__(self):
'''Generate some random data for demostration.
Actual data can be anything that is currently supported by XGBoost.
'''
self.rows = ROWS_PER_BATCH
self.cols = COLS
rng = cupy.random.RandomState(1994)
self._data = [rng.randn(self.rows, self.cols)] * BATCHES
self._labels = [rng.randn(self.rows)] * BATCHES
self._weights = [rng.uniform(size=self.rows)] * BATCHES
self.it = 0 # set iterator to 0
super().__init__()
def as_array(self):
return cupy.concatenate(self._data)
def as_array_labels(self):
return cupy.concatenate(self._labels)
def as_array_weights(self):
return cupy.concatenate(self._weights)
def data(self):
'''Utility function for obtaining current batch of data.'''
return self._data[self.it]
def labels(self):
'''Utility function for obtaining current batch of label.'''
return self._labels[self.it]
def weights(self):
return self._weights[self.it]
def reset(self):
'''Reset the iterator'''
self.it = 0
def next(self, input_data):
'''Yield next batch of data.'''
if self.it == len(self._data):
# Return 0 when there's no more batch.
return 0
input_data(data=self.data(), label=self.labels(),
weight=self.weights())
self.it += 1
return 1
def main():
rounds = 100
it = IterForDMatrixDemo()
# Use iterator, must be `DeviceQuantileDMatrix`
m_with_it = xgboost.DeviceQuantileDMatrix(it)
# Use regular DMatrix.
m = xgboost.DMatrix(it.as_array(), it.as_array_labels(),
weight=it.as_array_weights())
assert m_with_it.num_col() == m.num_col()
assert m_with_it.num_row() == m.num_row()
reg_with_it = xgboost.train({'tree_method': 'gpu_hist'}, m_with_it,
num_boost_round=rounds)
predict_with_it = reg_with_it.predict(m_with_it)
reg = xgboost.train({'tree_method': 'gpu_hist'}, m,
num_boost_round=rounds)
predict = reg.predict(m)
numpy.testing.assert_allclose(predict_with_it, predict,
rtol=1e6)
if __name__ == '__main__':
main()
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/data_iterator.py |
import os
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train'))
dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test'))
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
###
# advanced: start from a initial base prediction
#
print('start running example to start from a initial prediction')
# specify parameters via map, definition are same as c++ version
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
# train xgboost for 1 round
bst = xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in
# set_base_margin
# do predict with output_margin=True, will always give you margin values
# before logistic transformation
ptrain = bst.predict(dtrain, output_margin=True)
ptest = bst.predict(dtest, output_margin=True)
dtrain.set_base_margin(ptrain)
dtest.set_base_margin(ptest)
print('this is result of running from initial prediction')
bst = xgb.train(param, dtrain, 1, watchlist)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/boost_from_prediction.py |
'''Using feature weight to change column sampling.
.. versionadded:: 1.3.0
'''
import numpy as np
import xgboost
from matplotlib import pyplot as plt
import argparse
def main(args):
rng = np.random.RandomState(1994)
kRows = 1000
kCols = 10
X = rng.randn(kRows, kCols)
y = rng.randn(kRows)
fw = np.ones(shape=(kCols,))
for i in range(kCols):
fw[i] *= float(i)
dtrain = xgboost.DMatrix(X, y)
dtrain.set_info(feature_weights=fw)
bst = xgboost.train({'tree_method': 'hist',
'colsample_bynode': 0.5},
dtrain, num_boost_round=10,
evals=[(dtrain, 'd')])
feature_map = bst.get_fscore()
# feature zero has 0 weight
assert feature_map.get('f0', None) is None
assert max(feature_map.values()) == feature_map.get('f9')
if args.plot:
xgboost.plot_importance(bst)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--plot',
type=int,
default=1,
help='Set to 0 to disable plotting the evaluation history.')
args = parser.parse_args()
main(args)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/feature_weights.py |
'''Demo for defining customized metric and objective. Notice that for
simplicity reason weight is not used in following example. In this
script, we implement the Squared Log Error (SLE) objective and RMSLE metric as customized
functions, then compare it with native implementation in XGBoost.
See doc/tutorials/custom_metric_obj.rst for a step by step
walkthrough, with other details.
The `SLE` objective reduces impact of outliers in training dataset,
hence here we also compare its performance with standard squared
error.
'''
import numpy as np
import xgboost as xgb
from typing import Tuple, Dict, List
from time import time
import argparse
import matplotlib
from matplotlib import pyplot as plt
# shape of generated data.
kRows = 4096
kCols = 16
kOutlier = 10000 # mean of generated outliers
kNumberOfOutliers = 64
kRatio = 0.7
kSeed = 1994
kBoostRound = 20
np.random.seed(seed=kSeed)
def generate_data() -> Tuple[xgb.DMatrix, xgb.DMatrix]:
'''Generate data containing outliers.'''
x = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
y += np.abs(np.min(y))
# Create outliers
for i in range(0, kNumberOfOutliers):
ind = np.random.randint(0, len(y)-1)
y[ind] += np.random.randint(0, kOutlier)
train_portion = int(kRows * kRatio)
# rmsle requires all label be greater than -1.
assert np.all(y > -1.0)
train_x: np.ndarray = x[: train_portion]
train_y: np.ndarray = y[: train_portion]
dtrain = xgb.DMatrix(train_x, label=train_y)
test_x = x[train_portion:]
test_y = y[train_portion:]
dtest = xgb.DMatrix(test_x, label=test_y)
return dtrain, dtest
def native_rmse(dtrain: xgb.DMatrix,
dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]:
'''Train using native implementation of Root Mean Squared Loss.'''
print('Squared Error')
squared_error = {
'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'tree_method': 'hist',
'seed': kSeed
}
start = time()
results: Dict[str, Dict[str, List[float]]] = {}
xgb.train(squared_error,
dtrain=dtrain,
num_boost_round=kBoostRound,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
print('Finished Squared Error in:', time() - start, '\n')
return results
def native_rmsle(dtrain: xgb.DMatrix,
dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]:
'''Train using native implementation of Squared Log Error.'''
print('Squared Log Error')
results: Dict[str, Dict[str, List[float]]] = {}
squared_log_error = {
'objective': 'reg:squaredlogerror',
'eval_metric': 'rmsle',
'tree_method': 'hist',
'seed': kSeed
}
start = time()
xgb.train(squared_log_error,
dtrain=dtrain,
num_boost_round=kBoostRound,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
print('Finished Squared Log Error in:', time() - start)
return results
def py_rmsle(dtrain: xgb.DMatrix, dtest: xgb.DMatrix) -> Dict:
'''Train using Python implementation of Squared Log Error.'''
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
'''Compute the gradient squared log error.'''
y = dtrain.get_label()
return (np.log1p(predt) - np.log1p(y)) / (predt + 1)
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
'''Compute the hessian for squared log error.'''
y = dtrain.get_label()
return ((-np.log1p(predt) + np.log1p(y) + 1) /
np.power(predt + 1, 2))
def squared_log(predt: np.ndarray,
dtrain: xgb.DMatrix) -> Tuple[np.ndarray, np.ndarray]:
'''Squared Log Error objective. A simplified version for RMSLE used as
objective function.
:math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`
'''
predt[predt < -1] = -1 + 1e-6
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
return grad, hess
def rmsle(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
''' Root mean squared log error metric.
:math:`\sqrt{\frac{1}{N}[log(pred + 1) - log(label + 1)]^2}`
'''
y = dtrain.get_label()
predt[predt < -1] = -1 + 1e-6
elements = np.power(np.log1p(y) - np.log1p(predt), 2)
return 'PyRMSLE', float(np.sqrt(np.sum(elements) / len(y)))
results: Dict[str, Dict[str, List[float]]] = {}
xgb.train({'tree_method': 'hist', 'seed': kSeed,
'disable_default_eval_metric': 1},
dtrain=dtrain,
num_boost_round=kBoostRound,
obj=squared_log,
feval=rmsle,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
return results
def plot_history(rmse_evals, rmsle_evals, py_rmsle_evals):
fig, axs = plt.subplots(3, 1)
ax0: matplotlib.axes.Axes = axs[0]
ax1: matplotlib.axes.Axes = axs[1]
ax2: matplotlib.axes.Axes = axs[2]
x = np.arange(0, kBoostRound, 1)
ax0.plot(x, rmse_evals['dtrain']['rmse'], label='train-RMSE')
ax0.plot(x, rmse_evals['dtest']['rmse'], label='test-RMSE')
ax0.legend()
ax1.plot(x, rmsle_evals['dtrain']['rmsle'], label='train-native-RMSLE')
ax1.plot(x, rmsle_evals['dtest']['rmsle'], label='test-native-RMSLE')
ax1.legend()
ax2.plot(x, py_rmsle_evals['dtrain']['PyRMSLE'], label='train-PyRMSLE')
ax2.plot(x, py_rmsle_evals['dtest']['PyRMSLE'], label='test-PyRMSLE')
ax2.legend()
plt.show()
plt.close()
def main(args):
dtrain, dtest = generate_data()
rmse_evals = native_rmse(dtrain, dtest)
rmsle_evals = native_rmsle(dtrain, dtest)
py_rmsle_evals = py_rmsle(dtrain, dtest)
if args.plot != 0:
plot_history(rmse_evals, rmsle_evals, py_rmsle_evals)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Arguments for custom RMSLE objective function demo.')
parser.add_argument(
'--plot',
type=int,
default=1,
help='Set to 0 to disable plotting the evaluation history.')
args = parser.parse_args()
main(args)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/custom_rmsle.py |
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_boston
import xgboost as xgb
import multiprocessing
if __name__ == "__main__":
print("Parallel Parameter optimization")
boston = load_boston()
y = boston['target']
X = boston['data']
xgb_model = xgb.XGBRegressor(n_jobs=multiprocessing.cpu_count() // 2)
clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6],
'n_estimators': [50, 100, 200]}, verbose=1,
n_jobs=2)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/sklearn_parallel.py |
'''
Created on 1 Apr 2015
@author: Jamie Hall
'''
import pickle
import xgboost as xgb
import numpy as np
from sklearn.model_selection import KFold, train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.datasets import load_iris, load_digits, load_boston
rng = np.random.RandomState(31337)
print("Zeros and Ones from the Digits dataset: binary classification")
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Iris: multiclass classification")
iris = load_iris()
y = iris['target']
X = iris['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Boston Housing: regression")
boston = load_boston()
y = boston['target']
X = boston['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBRegressor(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(mean_squared_error(actuals, predictions))
print("Parameter optimization")
y = boston['target']
X = boston['data']
xgb_model = xgb.XGBRegressor(n_jobs=1)
clf = GridSearchCV(xgb_model,
{'max_depth': [2, 4, 6],
'n_estimators': [50, 100, 200]}, verbose=1, n_jobs=1)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
# The sklearn API models are picklable
print("Pickling sklearn API models")
# must open in binary format to pickle
pickle.dump(clf, open("best_boston.pkl", "wb"))
clf2 = pickle.load(open("best_boston.pkl", "rb"))
print(np.allclose(clf.predict(X), clf2.predict(X)))
# Early-stopping
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = xgb.XGBClassifier(n_jobs=1)
clf.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
eval_set=[(X_test, y_test)])
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/sklearn_examples.py |
###
# advanced: customized loss function
#
import os
import numpy as np
import xgboost as xgb
print('start running example to used customized objective function')
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train'))
dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test'))
# note: what we are getting is margin value in prediction you must know what
# you are doing
param = {'max_depth': 2, 'eta': 1, 'objective': 'reg:logistic'}
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 10
# user define objective function, given prediction, return gradient and second
# order gradient this is log likelihood loss
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds)) # transform raw leaf weight
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is
# margin, which means the prediction is score before logistic transformation.
def evalerror(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds)) # transform raw leaf weight
# return a pair metric_name, result. The metric name must not contain a
# colon (:) or a space
return 'my-error', float(sum(labels != (preds > 0.5))) / len(labels)
py_evals_result = {}
# training with customized objective, we can also do step by step training
# simply look at training.py's implementation of train
py_params = param.copy()
py_params.update({'disable_default_eval_metric': True})
py_logreg = xgb.train(py_params, dtrain, num_round, watchlist, obj=logregobj,
feval=evalerror, evals_result=py_evals_result)
evals_result = {}
params = param.copy()
params.update({'eval_metric': 'error'})
logreg = xgb.train(params, dtrain, num_boost_round=num_round, evals=watchlist,
evals_result=evals_result)
for i in range(len(py_evals_result['train']['my-error'])):
np.testing.assert_almost_equal(py_evals_result['train']['my-error'],
evals_result['train']['error'])
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/custom_objective.py |
import xgboost as xgb
import numpy as np
# this script demonstrates how to fit gamma regression model (with log link function)
# in xgboost, before running the demo you need to generate the autoclaims dataset
# by running gen_autoclaims.R located in xgboost/demo/data.
data = np.genfromtxt('../data/autoclaims.csv', delimiter=',')
dtrain = xgb.DMatrix(data[0:4741, 0:34], data[0:4741, 34])
dtest = xgb.DMatrix(data[4741:6773, 0:34], data[4741:6773, 34])
# for gamma regression, we need to set the objective to 'reg:gamma', it also suggests
# to set the base_score to a value between 1 to 5 if the number of iteration is small
param = {'objective':'reg:gamma', 'booster':'gbtree', 'base_score':3}
# the rest of settings are the same
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 30
# training and evaluation
bst = xgb.train(param, dtrain, num_round, watchlist)
preds = bst.predict(dtest)
labels = dtest.get_label()
print('test deviance=%f' % (2 * np.sum((labels - preds) / preds - np.log(labels) + np.log(preds))))
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/gamma_regression.py |
import os
import numpy as np
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def native_interface():
# load data in do training
dtrain = xgb.DMatrix(train)
dtest = xgb.DMatrix(test)
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
watchlist = [(dtest, "eval"), (dtrain, "train")]
num_round = 3
bst = xgb.train(param, dtrain, num_round, watchlist)
print("start testing prediction from first n trees")
# predict using first 1 tree
label = dtest.get_label()
ypred1 = bst.predict(dtest, iteration_range=(0, 1))
# by default, we predict using all the trees
ypred2 = bst.predict(dtest)
print("error of ypred1=%f" % (np.sum((ypred1 > 0.5) != label) / float(len(label))))
print("error of ypred2=%f" % (np.sum((ypred2 > 0.5) != label) / float(len(label))))
def sklearn_interface():
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
clf = xgb.XGBClassifier(n_estimators=3, max_depth=2, eta=1, use_label_encoder=False)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
assert clf.n_classes_ == 2
print("start testing prediction from first n trees")
# predict using first 1 tree
ypred1 = clf.predict(X_test, iteration_range=(0, 1))
# by default, we predict using all the trees
ypred2 = clf.predict(X_test)
print(
"error of ypred1=%f" % (np.sum((ypred1 > 0.5) != y_test) / float(len(y_test)))
)
print(
"error of ypred2=%f" % (np.sum((ypred2 > 0.5) != y_test) / float(len(y_test)))
)
if __name__ == "__main__":
native_interface()
sklearn_interface()
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/predict_first_ntree.py |
'''
Demo for using and defining callback functions.
.. versionadded:: 1.3.0
'''
import xgboost as xgb
import tempfile
import os
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import argparse
class Plotting(xgb.callback.TrainingCallback):
'''Plot evaluation result during training. Only for demonstration purpose as it's quite
slow to draw.
'''
def __init__(self, rounds):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data, metric):
return f'{data}-{metric}'
def after_iteration(self, model, epoch, evals_log):
'''Update the plot.'''
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key], = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback():
'''Demo for defining a custom callback function that plots evaluation result during
training.'''
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
'objective': 'binary:logistic',
'eval_metric': ['error', 'rmse'],
'tree_method': 'gpu_hist'
},
D_train,
evals=[(D_train, 'Train'), (D_valid, 'Valid')],
num_boost_round=num_boost_round,
callbacks=[plotting])
def check_point_callback():
# only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle):
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, 'model_' + str(i) + '.pkl')
else:
path = os.path.join(tmpdir, 'model_' + str(i) + '.json')
assert(os.path.exists(path))
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
iterations=rounds,
name='model')
xgb.train({'objective': 'binary:logistic'}, m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point])
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
iterations=rounds,
as_pickle=True,
name='model')
xgb.train({'objective': 'binary:logistic'}, m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point])
check(True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--plot', default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/callbacks.py |
##
# This script demonstrate how to access the eval metrics in xgboost
##
import os
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train'))
dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test'))
param = [('max_depth', 2), ('objective', 'binary:logistic'), ('eval_metric', 'logloss'), ('eval_metric', 'error')]
num_round = 2
watchlist = [(dtest,'eval'), (dtrain,'train')]
evals_result = {}
bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=evals_result)
print('Access logloss metric directly from evals_result:')
print(evals_result['eval']['logloss'])
print('')
print('Access metrics through a loop:')
for e_name, e_mtrs in evals_result.items():
print('- {}'.format(e_name))
for e_mtr_name, e_mtr_vals in e_mtrs.items():
print(' - {}'.format(e_mtr_name))
print(' - {}'.format(e_mtr_vals))
print('')
print('Access complete dictionary:')
print(evals_result)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/evals_result.py |
import os
import xgboost as xgb
# load data in do training
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train'))
dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test'))
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 3
bst = xgb.train(param, dtrain, num_round, watchlist)
print('start testing predict the leaf indices')
# predict using first 2 tree
leafindex = bst.predict(dtest, ntree_limit=2, pred_leaf=True)
print(leafindex.shape)
print(leafindex)
# predict all trees
leafindex = bst.predict(dtest, pred_leaf=True)
print(leafindex.shape)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/predict_leaf_indices.py |
##
# This script demonstrate how to access the xgboost eval metrics by using sklearn
##
import xgboost as xgb
import numpy as np
from sklearn.datasets import make_hastie_10_2
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
# Map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:1600], X[1600:]
y_train, y_test = y[:1600], y[1600:]
param_dist = {'objective':'binary:logistic', 'n_estimators':2}
clf = xgb.XGBModel(**param_dist)
# Or you can use: clf = xgb.XGBClassifier(**param_dist)
clf.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric='logloss',
verbose=True)
# Load evals result by calling the evals_result() function
evals_result = clf.evals_result()
print('Access logloss metric directly from validation_0:')
print(evals_result['validation_0']['logloss'])
print('')
print('Access metrics through a loop:')
for e_name, e_mtrs in evals_result.items():
print('- {}'.format(e_name))
for e_mtr_name, e_mtr_vals in e_mtrs.items():
print(' - {}'.format(e_mtr_name))
print(' - {}'.format(e_mtr_vals))
print('')
print('Access complete dict:')
print(evals_result)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/sklearn_evals_result.py |
import os
import xgboost as xgb
##
# this script demonstrate how to fit generalized linear model in xgboost
# basically, we are using linear model, instead of tree for our boosters
##
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train'))
dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test'))
# change booster to gblinear, so that we are fitting a linear model
# alpha is the L1 regularizer
# lambda is the L2 regularizer
# you can also set lambda_bias which is L2 regularizer on the bias term
param = {'objective':'binary:logistic', 'booster':'gblinear',
'alpha': 0.0001, 'lambda': 1}
# normally, you do not need to set eta (step_size)
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
# there could be affection on convergence with parallelization on certain cases
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
# param['eta'] = 1
##
# the rest of settings are the same
##
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 4
bst = xgb.train(param, dtrain, num_round, watchlist)
preds = bst.predict(dtest)
labels = dtest.get_label()
print('error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/generalized_linear_model.py |
import os
import numpy as np
import xgboost as xgb
# load data in do training
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train'))
param = {'max_depth':2, 'eta':1, 'objective':'binary:logistic'}
num_round = 2
print('running cross validation')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'error'}, seed=0,
callbacks=[xgb.callback.EvaluationMonitor(show_stdv=True)])
print('running cross validation, disable standard deviation display')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value
res = xgb.cv(param, dtrain, num_boost_round=10, nfold=5,
metrics={'error'}, seed=0,
callbacks=[xgb.callback.EvaluationMonitor(show_stdv=False),
xgb.callback.EarlyStopping(3)])
print(res)
print('running cross validation, with preprocessing function')
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'auc'}, seed=0, fpreproc=fpreproc)
###
# you can also do cross validation with customized loss function
# See custom_objective.py
##
print('running cross validation, with customized loss function')
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
def evalerror(preds, dtrain):
labels = dtrain.get_label()
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
param = {'max_depth':2, 'eta':1}
# train with customized objective
xgb.cv(param, dtrain, num_round, nfold=5, seed=0,
obj=logregobj, feval=evalerror)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/cross_validation.py |
import numpy as np
import scipy.sparse
import pickle
import xgboost as xgb
import os
# Make sure the demo knows where to load the data.
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
XGBOOST_ROOT_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR))
DEMO_DIR = os.path.join(XGBOOST_ROOT_DIR, 'demo')
# simple example
# load file from text file, also binary buffer generated by xgboost
dtrain = xgb.DMatrix(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.train'))
dtest = xgb.DMatrix(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.test'))
# specify parameters via map, definition are same as c++ version
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
# specify validations set to watch performance
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
print('error=%f' %
(sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) /
float(len(preds))))
bst.save_model('0001.model')
# dump model
bst.dump_model('dump.raw.txt')
# dump model with feature map
bst.dump_model('dump.nice.txt', os.path.join(DEMO_DIR, 'data/featmap.txt'))
# save dmatrix into binary buffer
dtest.save_binary('dtest.buffer')
# save model
bst.save_model('xgb.model')
# load model and data in
bst2 = xgb.Booster(model_file='xgb.model')
dtest2 = xgb.DMatrix('dtest.buffer')
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
# alternatively, you can pickle the booster
pks = pickle.dumps(bst2)
# load model and data in
bst3 = pickle.loads(pks)
preds3 = bst3.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds3 - preds)) == 0
###
# build dmatrix from scipy.sparse
print('start running example of build DMatrix from scipy.sparse CSR Matrix')
labels = []
row = []
col = []
dat = []
i = 0
for l in open(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.train')):
arr = l.split()
labels.append(int(arr[0]))
for it in arr[1:]:
k, v = it.split(':')
row.append(i)
col.append(int(k))
dat.append(float(v))
i += 1
csr = scipy.sparse.csr_matrix((dat, (row, col)))
dtrain = xgb.DMatrix(csr, label=labels)
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
bst = xgb.train(param, dtrain, num_round, watchlist)
print('start running example of build DMatrix from scipy.sparse CSC Matrix')
# we can also construct from csc matrix
csc = scipy.sparse.csc_matrix((dat, (row, col)))
dtrain = xgb.DMatrix(csc, label=labels)
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
bst = xgb.train(param, dtrain, num_round, watchlist)
print('start running example of build DMatrix from numpy array')
# NOTE: npymat is numpy array, we will convert it into scipy.sparse.csr_matrix
# in internal implementation then convert to DMatrix
npymat = csr.todense()
dtrain = xgb.DMatrix(npymat, label=labels)
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
bst = xgb.train(param, dtrain, num_round, watchlist)
| spark-xgboost-nv-release_1.4.0 | demo/guide-python/basic_walkthrough.py |
import xgboost as xgb
from sklearn.datasets import fetch_covtype
from sklearn.model_selection import train_test_split
import time
# Fetch dataset using sklearn
cov = fetch_covtype()
X = cov.data
y = cov.target
# Create 0.75/0.25 train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, train_size=0.75,
random_state=42)
# Specify sufficient boosting iterations to reach a minimum
num_round = 3000
# Leave most parameters as default
param = {'objective': 'multi:softmax', # Specify multiclass classification
'num_class': 8, # Number of possible output classes
'tree_method': 'gpu_hist' # Use GPU accelerated algorithm
}
# Convert input data from numpy to XGBoost format
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
gpu_res = {} # Store accuracy result
tmp = time.time()
# Train model
xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=gpu_res)
print("GPU Training Time: %s seconds" % (str(time.time() - tmp)))
# Repeat for CPU algorithm
tmp = time.time()
param['tree_method'] = 'hist'
cpu_res = {}
xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=cpu_res)
print("CPU Training Time: %s seconds" % (str(time.time() - tmp)))
| spark-xgboost-nv-release_1.4.0 | demo/gpu_acceleration/cover_type.py |
#!/usr/bin/python
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
# This script demonstrate how to do ranking with XGBRanker
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
x_test, y_test = load_svmlight_file("mq2008.test")
group_train = []
with open("mq2008.train.group", "r") as f:
data = f.readlines()
for line in data:
group_train.append(int(line.split("\n")[0]))
group_valid = []
with open("mq2008.vali.group", "r") as f:
data = f.readlines()
for line in data:
group_valid.append(int(line.split("\n")[0]))
group_test = []
with open("mq2008.test.group", "r") as f:
data = f.readlines()
for line in data:
group_test.append(int(line.split("\n")[0]))
params = {'objective': 'rank:ndcg', 'learning_rate': 0.1,
'gamma': 1.0, 'min_child_weight': 0.1,
'max_depth': 6, 'n_estimators': 4}
model = xgb.sklearn.XGBRanker(**params)
model.fit(x_train, y_train, group_train, verbose=True,
eval_set=[(x_valid, y_valid)], eval_group=[group_valid])
pred = model.predict(x_test)
| spark-xgboost-nv-release_1.4.0 | demo/rank/rank_sklearn.py |
import sys
def save_data(group_data,output_feature,output_group):
if len(group_data) == 0:
return
output_group.write(str(len(group_data))+"\n")
for data in group_data:
# only include nonzero features
feats = [ p for p in data[2:] if float(p.split(':')[1]) != 0.0 ]
output_feature.write(data[0] + " " + " ".join(feats) + "\n")
if __name__ == "__main__":
if len(sys.argv) != 4:
print ("Usage: python trans_data.py [Ranksvm Format Input] [Output Feature File] [Output Group File]")
sys.exit(0)
fi = open(sys.argv[1])
output_feature = open(sys.argv[2],"w")
output_group = open(sys.argv[3],"w")
group_data = []
group = ""
for line in fi:
if not line:
break
if "#" in line:
line = line[:line.index("#")]
splits = line.strip().split(" ")
if splits[1] != group:
save_data(group_data,output_feature,output_group)
group_data = []
group = splits[1]
group_data.append(splits)
save_data(group_data,output_feature,output_group)
fi.close()
output_feature.close()
output_group.close()
| spark-xgboost-nv-release_1.4.0 | demo/rank/trans_data.py |
#!/usr/bin/python
import xgboost as xgb
from xgboost import DMatrix
from sklearn.datasets import load_svmlight_file
# This script demonstrate how to do ranking with xgboost.train
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
x_test, y_test = load_svmlight_file("mq2008.test")
group_train = []
with open("mq2008.train.group", "r") as f:
data = f.readlines()
for line in data:
group_train.append(int(line.split("\n")[0]))
group_valid = []
with open("mq2008.vali.group", "r") as f:
data = f.readlines()
for line in data:
group_valid.append(int(line.split("\n")[0]))
group_test = []
with open("mq2008.test.group", "r") as f:
data = f.readlines()
for line in data:
group_test.append(int(line.split("\n")[0]))
train_dmatrix = DMatrix(x_train, y_train)
valid_dmatrix = DMatrix(x_valid, y_valid)
test_dmatrix = DMatrix(x_test)
train_dmatrix.set_group(group_train)
valid_dmatrix.set_group(group_valid)
params = {'objective': 'rank:ndcg', 'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
evals=[(valid_dmatrix, 'validation')])
pred = xgb_model.predict(test_dmatrix)
| spark-xgboost-nv-release_1.4.0 | demo/rank/rank.py |
import boto3
import json
lambda_client = boto3.client('lambda', region_name='us-west-2')
# Source code for the Lambda function is available at https://github.com/hcho3/xgboost-devops
r = lambda_client.invoke(
FunctionName='XGBoostCICostWatcher',
InvocationType='RequestResponse',
Payload='{}'.encode('utf-8')
)
payload = r['Payload'].read().decode('utf-8')
if 'FunctionError' in r:
msg = 'Error when invoking the Lambda function. Stack trace:\n'
error = json.loads(payload)
msg += f" {error['errorType']}: {error['errorMessage']}\n"
for trace in error['stackTrace']:
for line in trace.split('\n'):
msg += f' {line}\n'
raise RuntimeError(msg)
response = json.loads(payload)
if response['approved']:
print(f"Testing approved. Reason: {response['reason']}")
else:
raise RuntimeError(f"Testing rejected. Reason: {response['reason']}")
| spark-xgboost-nv-release_1.4.0 | tests/jenkins_get_approval.py |
"""Run benchmark on the tree booster."""
import argparse
import ast
import time
import numpy as np
import xgboost as xgb
RNG = np.random.RandomState(1994)
def run_benchmark(args):
"""Runs the benchmark."""
try:
dtest = xgb.DMatrix('dtest.dm')
dtrain = xgb.DMatrix('dtrain.dm')
if not (dtest.num_col() == args.columns
and dtrain.num_col() == args.columns):
raise ValueError("Wrong cols")
if not (dtest.num_row() == args.rows * args.test_size
and dtrain.num_row() == args.rows * (1 - args.test_size)):
raise ValueError("Wrong rows")
except:
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size))
tmp = time.time()
X = RNG.rand(args.rows, args.columns)
y = RNG.randint(0, 2, args.rows)
if 0.0 < args.sparsity < 1.0:
X = np.array([[np.nan if RNG.uniform(0, 1) < args.sparsity else x for x in x_row]
for x_row in X])
train_rows = int(args.rows * (1.0 - args.test_size))
test_rows = int(args.rows * args.test_size)
X_train = X[:train_rows, :]
X_test = X[-test_rows:, :]
y_train = y[:train_rows]
y_test = y[-test_rows:]
print("Generate Time: %s seconds" % (str(time.time() - tmp)))
del X, y
tmp = time.time()
print("DMatrix Start")
dtrain = xgb.DMatrix(X_train, y_train, nthread=-1)
dtest = xgb.DMatrix(X_test, y_test, nthread=-1)
print("DMatrix Time: %s seconds" % (str(time.time() - tmp)))
del X_train, y_train, X_test, y_test
dtest.save_binary('dtest.dm')
dtrain.save_binary('dtrain.dm')
param = {'objective': 'binary:logistic'}
if args.params != '':
param.update(ast.literal_eval(args.params))
param['tree_method'] = args.tree_method
print("Training with '%s'" % param['tree_method'])
tmp = time.time()
xgb.train(param, dtrain, args.iterations, evals=[(dtest, "test")])
print("Train Time: %s seconds" % (str(time.time() - tmp)))
def main():
"""The main function.
Defines and parses command line arguments and calls the benchmark.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--tree_method', default='gpu_hist')
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--iterations', type=int, default=500)
parser.add_argument('--test_size', type=float, default=0.25)
parser.add_argument('--params', default='',
help='Provide additional parameters as a Python dict string, e.g. --params '
'\"{\'max_depth\':2}\"')
args = parser.parse_args()
run_benchmark(args)
if __name__ == '__main__':
main()
| spark-xgboost-nv-release_1.4.0 | tests/benchmark/benchmark_tree.py |
#pylint: skip-file
import argparse
import xgboost as xgb
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import time
import ast
rng = np.random.RandomState(1994)
def run_benchmark(args):
try:
dtest = xgb.DMatrix('dtest.dm')
dtrain = xgb.DMatrix('dtrain.dm')
if not (dtest.num_col() == args.columns \
and dtrain.num_col() == args.columns):
raise ValueError("Wrong cols")
if not (dtest.num_row() == args.rows * args.test_size \
and dtrain.num_row() == args.rows * (1-args.test_size)):
raise ValueError("Wrong rows")
except:
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size))
tmp = time.time()
X, y = make_classification(args.rows, n_features=args.columns, n_redundant=0, n_informative=args.columns, n_repeated=0, random_state=7)
if args.sparsity < 1.0:
X = np.array([[np.nan if rng.uniform(0, 1) < args.sparsity else x for x in x_row] for x_row in X])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=7)
print ("Generate Time: %s seconds" % (str(time.time() - tmp)))
tmp = time.time()
print ("DMatrix Start")
dtrain = xgb.DMatrix(X_train, y_train)
dtest = xgb.DMatrix(X_test, y_test, nthread=-1)
print ("DMatrix Time: %s seconds" % (str(time.time() - tmp)))
dtest.save_binary('dtest.dm')
dtrain.save_binary('dtrain.dm')
param = {'objective': 'binary:logistic','booster':'gblinear'}
if args.params is not '':
param.update(ast.literal_eval(args.params))
param['updater'] = args.updater
print("Training with '%s'" % param['updater'])
tmp = time.time()
xgb.train(param, dtrain, args.iterations, evals=[(dtrain,"train")], early_stopping_rounds = args.columns)
print ("Train Time: %s seconds" % (str(time.time() - tmp)))
parser = argparse.ArgumentParser()
parser.add_argument('--updater', default='coord_descent')
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--lambda', type=float, default=1.0)
parser.add_argument('--tol', type=float, default=1e-5)
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--iterations', type=int, default=10000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--test_size', type=float, default=0.25)
parser.add_argument('--standardise', type=bool, default=False)
parser.add_argument('--params', default='', help='Provide additional parameters as a Python dict string, e.g. --params \"{\'max_depth\':2}\"')
args = parser.parse_args()
run_benchmark(args)
| spark-xgboost-nv-release_1.4.0 | tests/benchmark/benchmark_linear.py |
"""Generate synthetic data in LibSVM format."""
import argparse
import io
import time
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
RNG = np.random.RandomState(2019)
def generate_data(args):
"""Generates the data."""
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("Sparsity {}".format(args.sparsity))
print("{}/{} train/test split".format(1.0 - args.test_size, args.test_size))
tmp = time.time()
n_informative = args.columns * 7 // 10
n_redundant = args.columns // 10
n_repeated = args.columns // 10
print("n_informative: {}, n_redundant: {}, n_repeated: {}".format(n_informative, n_redundant,
n_repeated))
x, y = make_classification(n_samples=args.rows, n_features=args.columns,
n_informative=n_informative, n_redundant=n_redundant,
n_repeated=n_repeated, shuffle=False, random_state=RNG)
print("Generate Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=args.test_size,
random_state=RNG, shuffle=False)
print("Train/Test Split Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
write_file('train.libsvm', x_train, y_train, args.sparsity)
print("Write Train Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
write_file('test.libsvm', x_test, y_test, args.sparsity)
print("Write Test Time: {} seconds".format(time.time() - tmp))
def write_file(filename, x_data, y_data, sparsity):
with open(filename, 'w') as f:
for x, y in zip(x_data, y_data):
write_line(f, x, y, sparsity)
def write_line(f, x, y, sparsity):
with io.StringIO() as line:
line.write(str(y))
for i, col in enumerate(x):
if 0.0 < sparsity < 1.0:
if RNG.uniform(0, 1) > sparsity:
write_feature(line, i, col)
else:
write_feature(line, i, col)
line.write('\n')
f.write(line.getvalue())
def write_feature(line, index, feature):
line.write(' ')
line.write(str(index))
line.write(':')
line.write(str(feature))
def main():
"""The main function.
Defines and parses command line arguments and calls the generator.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--test_size', type=float, default=0.01)
args = parser.parse_args()
generate_data(args)
if __name__ == '__main__':
main()
| spark-xgboost-nv-release_1.4.0 | tests/benchmark/generate_libsvm.py |
import sys
import re
import zipfile
import glob
if len(sys.argv) != 2:
print('Usage: {} [wheel]'.format(sys.argv[0]))
sys.exit(1)
vcomp140_path = 'C:\\Windows\\System32\\vcomp140.dll'
for wheel_path in sorted(glob.glob(sys.argv[1])):
m = re.search(r'xgboost-(.*)-py3', wheel_path)
assert m, f'wheel_path = {wheel_path}'
version = m.group(1)
with zipfile.ZipFile(wheel_path, 'a') as f:
f.write(vcomp140_path, 'xgboost-{}.data/data/xgboost/vcomp140.dll'.format(version))
| spark-xgboost-nv-release_1.4.0 | tests/ci_build/insert_vcomp140.py |
import sys
import os
from contextlib import contextmanager
@contextmanager
def cd(path):
path = os.path.normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
if len(sys.argv) != 4:
print('Usage: {} [wheel to rename] [commit id] [platform tag]'.format(sys.argv[0]))
sys.exit(1)
whl_path = sys.argv[1]
commit_id = sys.argv[2]
platform_tag = sys.argv[3]
dirname, basename = os.path.dirname(whl_path), os.path.basename(whl_path)
with cd(dirname):
tokens = basename.split('-')
assert len(tokens) == 5
version = tokens[1].split('+')[0]
keywords = {'pkg_name': tokens[0],
'version': version,
'commit_id': commit_id,
'platform_tag': platform_tag}
new_name = '{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl'.format(**keywords)
print('Renaming {} to {}...'.format(basename, new_name))
os.rename(basename, new_name)
| spark-xgboost-nv-release_1.4.0 | tests/ci_build/rename_whl.py |
import argparse
import os
import subprocess
ROOT = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir,
os.path.pardir))
r_package = os.path.join(ROOT, 'R-package')
class DirectoryExcursion:
def __init__(self, path: os.PathLike):
self.path = path
self.curdir = os.path.normpath(os.path.abspath(os.path.curdir))
def __enter__(self):
os.chdir(self.path)
def __exit__(self, *args):
os.chdir(self.curdir)
def get_mingw_bin():
return os.path.join('c:/rtools40/mingw64/', 'bin')
def test_with_autotools(args):
with DirectoryExcursion(r_package):
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, 'g++.exe')
CC = os.path.join(mingw_bin, 'gcc.exe')
cmd = ['R.exe', 'CMD', 'INSTALL', str(os.path.curdir)]
env = os.environ.copy()
env.update({'CC': CC, 'CXX': CXX})
subprocess.check_call(cmd, env=env)
subprocess.check_call([
'R.exe', '-q', '-e',
"library(testthat); setwd('tests'); source('testthat.R')"
])
subprocess.check_call([
'R.exe', '-q', '-e',
"demo(runall, package = 'xgboost')"
])
def test_with_cmake(args):
os.mkdir('build')
with DirectoryExcursion('build'):
if args.compiler == 'mingw':
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, 'g++.exe')
CC = os.path.join(mingw_bin, 'gcc.exe')
env = os.environ.copy()
env.update({'CC': CC, 'CXX': CXX})
subprocess.check_call([
'cmake', os.path.pardir, '-DUSE_OPENMP=ON', '-DR_LIB=ON',
'-DCMAKE_CONFIGURATION_TYPES=Release', '-G', 'Unix Makefiles',
],
env=env)
subprocess.check_call(['make', '-j', 'install'])
elif args.compiler == 'msvc':
subprocess.check_call([
'cmake', os.path.pardir, '-DUSE_OPENMP=ON', '-DR_LIB=ON',
'-DCMAKE_CONFIGURATION_TYPES=Release', '-A', 'x64'
])
subprocess.check_call([
'cmake', '--build', os.path.curdir, '--target', 'install',
'--config', 'Release'
])
else:
raise ValueError('Wrong compiler')
with DirectoryExcursion(r_package):
subprocess.check_call([
'R.exe', '-q', '-e',
"library(testthat); setwd('tests'); source('testthat.R')"
])
subprocess.check_call([
'R.exe', '-q', '-e',
"demo(runall, package = 'xgboost')"
])
def main(args):
if args.build_tool == 'autotools':
test_with_autotools(args)
else:
test_with_cmake(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--compiler',
type=str,
choices=['mingw', 'msvc'],
help='Compiler used for compiling CXX code.')
parser.add_argument(
'--build-tool',
type=str,
choices=['cmake', 'autotools'],
help='Build tool for compiling CXX code and install R package.')
args = parser.parse_args()
main(args)
| spark-xgboost-nv-release_1.4.0 | tests/ci_build/test_r_package.py |
#!/usr/bin/env python
import subprocess
import yaml
import json
from multiprocessing import Pool, cpu_count
import shutil
import os
import sys
import re
import argparse
from time import time
def call(args):
'''Subprocess run wrapper.'''
completed = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_msg = completed.stdout.decode('utf-8')
# `workspace` is a name used in Jenkins CI. Normally we should keep the
# dir as `xgboost`.
matched = re.search('(workspace|xgboost)/.*(src|tests|include)/.*warning:',
error_msg,
re.MULTILINE)
if matched is None:
return_code = 0
else:
return_code = 1
return (completed.returncode, return_code, error_msg)
class ClangTidy(object):
''' clang tidy wrapper.
Args:
args: Command line arguments.
cpp_lint: Run linter on C++ source code.
cuda_lint: Run linter on CUDA source code.
use_dmlc_gtest: Whether to use gtest bundled in dmlc-core.
'''
def __init__(self, args):
self.cpp_lint = args.cpp
self.cuda_lint = args.cuda
self.use_dmlc_gtest = args.use_dmlc_gtest
if args.tidy_version:
self.exe = 'clang-tidy-' + str(args.tidy_version)
else:
self.exe = 'clang-tidy'
print('Run linter on CUDA: ', self.cuda_lint)
print('Run linter on C++:', self.cpp_lint)
print('Use dmlc gtest:', self.use_dmlc_gtest)
if not self.cpp_lint and not self.cuda_lint:
raise ValueError('Both --cpp and --cuda are set to 0.')
self.root_path = os.path.abspath(os.path.curdir)
print('Project root:', self.root_path)
self.cdb_path = os.path.join(self.root_path, 'cdb')
def __enter__(self):
self.start = time()
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self._generate_cdb()
return self
def __exit__(self, *args):
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self.end = time()
print('Finish running clang-tidy:', self.end - self.start)
def _generate_cdb(self):
'''Run CMake to generate compilation database.'''
os.mkdir(self.cdb_path)
os.chdir(self.cdb_path)
cmake_args = ['cmake', '..', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DGOOGLE_TEST=ON']
if self.use_dmlc_gtest:
cmake_args.append('-DUSE_DMLC_GTEST=ON')
else:
cmake_args.append('-DUSE_DMLC_GTEST=OFF')
if self.cuda_lint:
cmake_args.extend(['-DUSE_CUDA=ON', '-DUSE_NCCL=ON'])
subprocess.run(cmake_args)
os.chdir(self.root_path)
def convert_nvcc_command_to_clang(self, command):
'''Convert nvcc flags to corresponding clang flags.'''
components = command.split()
compiler: str = components[0]
if compiler.find('nvcc') != -1:
compiler = 'clang++'
components[0] = compiler
# check each component in a command
converted_components = [compiler]
for i in range(1, len(components)):
if components[i] == '-lineinfo':
continue
elif components[i] == '-fuse-ld=gold':
continue
elif components[i] == '-rdynamic':
continue
elif (components[i] == '-x' and
components[i+1] == 'cu'):
# -x cu -> -x cuda
converted_components.append('-x')
converted_components.append('cuda')
components[i+1] = ''
continue
elif components[i].find('-Xcompiler') != -1:
continue
elif components[i].find('--expt') != -1:
continue
elif components[i].find('-ccbin') != -1:
continue
elif components[i].find('--generate-code') != -1:
keyword = 'code=sm'
pos = components[i].find(keyword)
capability = components[i][pos + len(keyword) + 1:
pos + len(keyword) + 3]
if pos != -1:
converted_components.append(
'--cuda-gpu-arch=sm_' + capability)
elif components[i].find('--std=c++14') != -1:
converted_components.append('-std=c++14')
elif components[i].startswith('-isystem='):
converted_components.extend(components[i].split('='))
else:
converted_components.append(components[i])
converted_components.append('-isystem /usr/local/cuda/include/')
command = ''
for c in converted_components:
command = command + ' ' + c
command = command.strip()
return command
def _configure_flags(self, path, command):
src = os.path.join(self.root_path, 'src')
src = src.replace('/', '\\/')
include = os.path.join(self.root_path, 'include')
include = include.replace('/', '\\/')
header_filter = '(' + src + '|' + include + ')'
common_args = [self.exe,
"-header-filter=" + header_filter,
'-config='+self.clang_tidy]
common_args.append(path)
common_args.append('--')
command = self.convert_nvcc_command_to_clang(command)
command = command.split()[1:] # remove clang/c++/g++
if '-c' in command:
index = command.index('-c')
del command[index+1]
command.remove('-c')
if '-o' in command:
index = command.index('-o')
del command[index+1]
command.remove('-o')
common_args.extend(command)
# Two passes, one for device code another for host code.
if path.endswith('cu'):
args = [common_args.copy(), common_args.copy()]
args[0].append('--cuda-host-only')
args[1].append('--cuda-device-only')
else:
args = [common_args.copy()]
for a in args:
a.append('-Wno-unused-command-line-argument')
return args
def _configure(self):
'''Load and configure compile_commands and clang_tidy.'''
def should_lint(path):
if not self.cpp_lint and path.endswith('.cc'):
return False
isxgb = path.find('rabit') == -1
isxgb = isxgb and path.find('dmlc-core') == -1
isxgb = isxgb and (not path.startswith(self.cdb_path))
if isxgb:
print(path)
return True
cdb_file = os.path.join(self.cdb_path, 'compile_commands.json')
with open(cdb_file, 'r') as fd:
self.compile_commands = json.load(fd)
tidy_file = os.path.join(self.root_path, '.clang-tidy')
with open(tidy_file) as fd:
self.clang_tidy = yaml.safe_load(fd)
self.clang_tidy = str(self.clang_tidy)
all_files = []
for entry in self.compile_commands:
path = entry['file']
if should_lint(path):
args = self._configure_flags(path, entry['command'])
all_files.extend(args)
return all_files
def run(self):
'''Run clang-tidy.'''
all_files = self._configure()
passed = True
BAR = '-'*32
with Pool(cpu_count()) as pool:
results = pool.map(call, all_files)
for i, (process_status, tidy_status, msg) in enumerate(results):
# Don't enforce clang-tidy to pass for now due to namespace
# for cub in thrust is not correct.
if tidy_status == 1:
passed = False
print(BAR, '\n'
'Process return code:', process_status, ', ',
'Tidy result code:', tidy_status, ', ',
'Message:\n', msg,
BAR, '\n')
if not passed:
print('Errors in `thrust` namespace can be safely ignored.',
'Please address rest of the clang-tidy warnings.')
return passed
def test_tidy(args):
'''See if clang-tidy and our regex is working correctly. There are
many subtleties we need to be careful. For instances:
* Is the string re-directed to pipe encoded as UTF-8? or is it
bytes?
* On Jenkins there's no 'xgboost' directory, are we catching the
right keywords?
* Should we use re.DOTALL?
* Should we use re.MULTILINE?
Tests here are not thorough, at least we want to guarantee tidy is
not missing anything on Jenkins.
'''
root_path = os.path.abspath(os.path.curdir)
tidy_file = os.path.join(root_path, '.clang-tidy')
test_file_path = os.path.join(root_path,
'tests', 'ci_build', 'test_tidy.cc')
with open(tidy_file) as fd:
tidy_config = fd.read()
tidy_config = str(tidy_config)
tidy_config = '-config='+tidy_config
if not args.tidy_version:
tidy = 'clang-tidy'
else:
tidy = 'clang-tidy-' + str(args.tidy_version)
args = [tidy, tidy_config, test_file_path]
(proc_code, tidy_status, error_msg) = call(args)
assert proc_code == 0
assert tidy_status == 1
print('clang-tidy is working.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run clang-tidy.')
parser.add_argument('--cpp', type=int, default=1)
parser.add_argument('--tidy-version', type=int, default=None,
help='Specify the version of preferred clang-tidy.')
parser.add_argument('--cuda', type=int, default=1)
parser.add_argument('--use-dmlc-gtest', type=int, default=1,
help='Whether to use gtest bundled in dmlc-core.')
args = parser.parse_args()
test_tidy(args)
with ClangTidy(args) as linter:
passed = linter.run()
if not passed:
sys.exit(1)
| spark-xgboost-nv-release_1.4.0 | tests/ci_build/tidy.py |
import xgboost as xgb
import testing as tm
import numpy as np
import pytest
import os
rng = np.random.RandomState(1337)
class TestTrainingContinuation:
num_parallel_tree = 3
def generate_parameters(self):
xgb_params_01_binary = {
'nthread': 1,
}
xgb_params_02_binary = {
'nthread': 1,
'num_parallel_tree': self.num_parallel_tree
}
xgb_params_03_binary = {
'nthread': 1,
'num_class': 5,
'num_parallel_tree': self.num_parallel_tree
}
return [
xgb_params_01_binary, xgb_params_02_binary, xgb_params_03_binary
]
def run_training_continuation(self, xgb_params_01, xgb_params_02,
xgb_params_03):
from sklearn.datasets import load_digits
from sklearn.metrics import mean_squared_error
digits_2class = load_digits(n_class=2)
digits_5class = load_digits(n_class=5)
X_2class = digits_2class['data']
y_2class = digits_2class['target']
X_5class = digits_5class['data']
y_5class = digits_5class['target']
dtrain_2class = xgb.DMatrix(X_2class, label=y_2class)
dtrain_5class = xgb.DMatrix(X_5class, label=y_5class)
gbdt_01 = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=10)
ntrees_01 = len(gbdt_01.get_dump())
assert ntrees_01 == 10
gbdt_02 = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=0)
gbdt_02.save_model('xgb_tc.model')
gbdt_02a = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=10, xgb_model=gbdt_02)
gbdt_02b = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=10, xgb_model="xgb_tc.model")
ntrees_02a = len(gbdt_02a.get_dump())
ntrees_02b = len(gbdt_02b.get_dump())
assert ntrees_02a == 10
assert ntrees_02b == 10
res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_02a.predict(dtrain_2class))
assert res1 == res2
res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_02b.predict(dtrain_2class))
assert res1 == res2
gbdt_03 = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=3)
gbdt_03.save_model('xgb_tc.model')
gbdt_03a = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=7, xgb_model=gbdt_03)
gbdt_03b = xgb.train(xgb_params_01, dtrain_2class,
num_boost_round=7, xgb_model="xgb_tc.model")
ntrees_03a = len(gbdt_03a.get_dump())
ntrees_03b = len(gbdt_03b.get_dump())
assert ntrees_03a == 10
assert ntrees_03b == 10
os.remove('xgb_tc.model')
res1 = mean_squared_error(y_2class, gbdt_03a.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_03b.predict(dtrain_2class))
assert res1 == res2
gbdt_04 = xgb.train(xgb_params_02, dtrain_2class,
num_boost_round=3)
assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration +
1) * self.num_parallel_tree
res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))
res2 = mean_squared_error(y_2class,
gbdt_04.predict(
dtrain_2class,
ntree_limit=gbdt_04.best_ntree_limit))
assert res1 == res2
gbdt_04 = xgb.train(xgb_params_02, dtrain_2class,
num_boost_round=7, xgb_model=gbdt_04)
assert gbdt_04.best_ntree_limit == (
gbdt_04.best_iteration + 1) * self.num_parallel_tree
res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))
res2 = mean_squared_error(y_2class,
gbdt_04.predict(
dtrain_2class,
ntree_limit=gbdt_04.best_ntree_limit))
assert res1 == res2
gbdt_05 = xgb.train(xgb_params_03, dtrain_5class,
num_boost_round=7)
assert gbdt_05.best_ntree_limit == (
gbdt_05.best_iteration + 1) * self.num_parallel_tree
gbdt_05 = xgb.train(xgb_params_03,
dtrain_5class,
num_boost_round=3,
xgb_model=gbdt_05)
assert gbdt_05.best_ntree_limit == (
gbdt_05.best_iteration + 1) * self.num_parallel_tree
res1 = gbdt_05.predict(dtrain_5class)
res2 = gbdt_05.predict(dtrain_5class,
ntree_limit=gbdt_05.best_ntree_limit)
np.testing.assert_almost_equal(res1, res2)
@pytest.mark.skipif(**tm.no_sklearn())
def test_training_continuation_json(self):
params = self.generate_parameters()
self.run_training_continuation(params[0], params[1], params[2])
@pytest.mark.skipif(**tm.no_sklearn())
def test_training_continuation_updaters_json(self):
# Picked up from R tests.
updaters = 'grow_colmaker,prune,refresh'
params = self.generate_parameters()
for p in params:
p['updater'] = updaters
self.run_training_continuation(params[0], params[1], params[2])
| spark-xgboost-nv-release_1.4.0 | tests/python/test_training_continuation.py |
# -*- coding: utf-8 -*-
import numpy as np
import os
import xgboost as xgb
import pytest
import json
from pathlib import Path
import tempfile
import testing as tm
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestBasic:
def test_compat(self):
from xgboost.compat import lazy_isinstance
a = np.array([1, 2, 3])
assert lazy_isinstance(a, 'numpy', 'ndarray')
assert not lazy_isinstance(a, 'numpy', 'dataframe')
def test_basic(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}
# specify validations set to watch performance
watchlist = [(dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist, verbose_eval=True)
preds = bst.predict(dtrain)
labels = dtrain.get_label()
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
# error must be smaller than 10%
assert err < 0.1
preds = bst.predict(dtest)
labels = dtest.get_label()
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
# error must be smaller than 10%
assert err < 0.1
with tempfile.TemporaryDirectory() as tmpdir:
dtest_path = os.path.join(tmpdir, 'dtest.dmatrix')
# save dmatrix into binary buffer
dtest.save_binary(dtest_path)
# save model
model_path = os.path.join(tmpdir, 'model.booster')
bst.save_model(model_path)
# load model and data in
bst2 = xgb.Booster(model_file=model_path)
dtest2 = xgb.DMatrix(dtest_path)
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
def test_metric_config(self):
# Make sure that the metric configuration happens in booster so the
# string `['error', 'auc']` doesn't get passed down to core.
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['error', 'auc']}
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
booster = xgb.train(param, dtrain, num_round, watchlist)
predt_0 = booster.predict(dtrain)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'model.json')
booster.save_model(path)
booster = xgb.Booster(params=param, model_file=path)
predt_1 = booster.predict(dtrain)
np.testing.assert_allclose(predt_0, predt_1)
def test_record_results(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'error'}
# specify validations set to watch performance
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
result = {}
res2 = {}
xgb.train(param, dtrain, num_round, watchlist,
callbacks=[xgb.callback.record_evaluation(result)])
xgb.train(param, dtrain, num_round, watchlist,
evals_result=res2)
assert result['train']['error'][0] < 0.1
assert res2 == result
def test_multiclass(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'num_class': 2}
# specify validations set to watch performance
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
err = sum(1 for i in range(len(preds))
if preds[i] != labels[i]) / float(len(preds))
# error must be smaller than 10%
assert err < 0.1
with tempfile.TemporaryDirectory() as tmpdir:
dtest_path = os.path.join(tmpdir, 'dtest.buffer')
model_path = os.path.join(tmpdir, 'xgb.model')
# save dmatrix into binary buffer
dtest.save_binary(dtest_path)
# save model
bst.save_model(model_path)
# load model and data in
bst2 = xgb.Booster(model_file=model_path)
dtest2 = xgb.DMatrix(dtest_path)
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
def test_dump(self):
data = np.random.randn(100, 2)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2']
dm = xgb.DMatrix(data, label=target, feature_names=features)
params = {'objective': 'binary:logistic',
'eval_metric': 'logloss',
'eta': 0.3,
'max_depth': 1}
bst = xgb.train(params, dm, num_boost_round=1)
# number of feature importances should == number of features
dump1 = bst.get_dump()
assert len(dump1) == 1, 'Expected only 1 tree to be dumped.'
len(dump1[0].splitlines()) == 3, 'Expected 1 root and 2 leaves - 3 lines in dump.'
dump2 = bst.get_dump(with_stats=True)
assert dump2[0].count('\n') == 3, 'Expected 1 root and 2 leaves - 3 lines in dump.'
msg = 'Expected more info when with_stats=True is given.'
assert dump2[0].find('\n') > dump1[0].find('\n'), msg
dump3 = bst.get_dump(dump_format="json")
dump3j = json.loads(dump3[0])
assert dump3j['nodeid'] == 0, 'Expected the root node on top.'
dump4 = bst.get_dump(dump_format="json", with_stats=True)
dump4j = json.loads(dump4[0])
assert 'gain' in dump4j, "Expected 'gain' to be dumped in JSON."
def test_load_file_invalid(self):
with pytest.raises(xgb.core.XGBoostError):
xgb.Booster(model_file='incorrect_path')
with pytest.raises(xgb.core.XGBoostError):
xgb.Booster(model_file=u'δΈζ£γͺγγΉ')
def test_dmatrix_numpy_init_omp(self):
rows = [1000, 11326, 15000]
cols = 50
for row in rows:
X = np.random.randn(row, cols)
y = np.random.randn(row).astype('f')
dm = xgb.DMatrix(X, y, nthread=0)
np.testing.assert_array_equal(dm.get_label(), y)
assert dm.num_row() == row
assert dm.num_col() == cols
dm = xgb.DMatrix(X, y, nthread=10)
np.testing.assert_array_equal(dm.get_label(), y)
assert dm.num_row() == row
assert dm.num_col() == cols
def test_cv(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
# return np.ndarray
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=False)
assert isinstance(cv, dict)
assert len(cv) == (4)
def test_cv_no_shuffle(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
# return np.ndarray
cv = xgb.cv(params, dm, num_boost_round=10, shuffle=False, nfold=10,
as_pandas=False)
assert isinstance(cv, dict)
assert len(cv) == (4)
def test_cv_explicit_fold_indices(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective':
'binary:logistic'}
folds = [
# Train Test
([1, 3], [5, 8]),
([7, 9], [23, 43]),
]
# return np.ndarray
cv = xgb.cv(params, dm, num_boost_round=10, folds=folds,
as_pandas=False)
assert isinstance(cv, dict)
assert len(cv) == (4)
def test_cv_explicit_fold_indices_labels(self):
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective':
'reg:squarederror'}
N = 100
F = 3
dm = xgb.DMatrix(data=np.random.randn(N, F), label=np.arange(N))
folds = [
# Train Test
([1, 3], [5, 8]),
([7, 9], [23, 43, 11]),
]
# Use callback to log the test labels in each fold
def cb(cbackenv):
print([fold.dtest.get_label() for fold in cbackenv.cvfolds])
# Run cross validation and capture standard out to test callback result
with tm.captured_output() as (out, err):
xgb.cv(
params, dm, num_boost_round=1, folds=folds, callbacks=[cb],
as_pandas=False
)
output = out.getvalue().strip()
solution = ('[array([5., 8.], dtype=float32), array([23., 43., 11.],' +
' dtype=float32)]')
assert output == solution
class TestBasicPathLike:
"""Unit tests using pathlib.Path for file interaction."""
def test_DMatrix_init_from_path(self):
"""Initialization from the data path."""
dpath = Path('demo/data')
dtrain = xgb.DMatrix(dpath / 'agaricus.txt.train')
assert dtrain.num_row() == 6513
assert dtrain.num_col() == 127
def test_DMatrix_save_to_path(self):
"""Saving to a binary file using pathlib from a DMatrix."""
data = np.random.randn(100, 2)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2']
dm = xgb.DMatrix(data, label=target, feature_names=features)
# save, assert exists, remove file
binary_path = Path("dtrain.bin")
dm.save_binary(binary_path)
assert binary_path.exists()
Path.unlink(binary_path)
def test_Booster_init_invalid_path(self):
"""An invalid model_file path should raise XGBoostError."""
with pytest.raises(xgb.core.XGBoostError):
xgb.Booster(model_file=Path("invalidpath"))
def test_Booster_save_and_load(self):
"""Saving and loading model files from paths."""
save_path = Path("saveload.model")
data = np.random.randn(100, 2)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2']
dm = xgb.DMatrix(data, label=target, feature_names=features)
params = {'objective': 'binary:logistic',
'eval_metric': 'logloss',
'eta': 0.3,
'max_depth': 1}
bst = xgb.train(params, dm, num_boost_round=1)
# save, assert exists
bst.save_model(save_path)
assert save_path.exists()
def dump_assertions(dump):
"""Assertions for the expected dump from Booster"""
assert len(dump) == 1, 'Exepcted only 1 tree to be dumped.'
assert len(dump[0].splitlines()) == 3, 'Expected 1 root and 2 leaves - 3 lines.'
# load the model again using Path
bst2 = xgb.Booster(model_file=save_path)
dump2 = bst2.get_dump()
dump_assertions(dump2)
# load again using load_model
bst3 = xgb.Booster()
bst3.load_model(save_path)
dump3 = bst3.get_dump()
dump_assertions(dump3)
# remove file
Path.unlink(save_path)
| spark-xgboost-nv-release_1.4.0 | tests/python/test_basic.py |
import xgboost as xgb
import testing as tm
import numpy as np
import pytest
rng = np.random.RandomState(1994)
class TestEarlyStopping:
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping_nonparallel(self):
from sklearn.datasets import load_digits
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
clf1 = xgb.XGBClassifier(learning_rate=0.1)
clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc",
eval_set=[(X_test, y_test)])
clf2 = xgb.XGBClassifier(learning_rate=0.1)
clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc",
eval_set=[(X_test, y_test)])
# should be the same
assert clf1.best_score == clf2.best_score
assert clf1.best_score != 1
# check overfit
clf3 = xgb.XGBClassifier(learning_rate=0.1)
clf3.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
eval_set=[(X_test, y_test)])
assert clf3.best_score == 1
def evalerror(self, preds, dtrain):
from sklearn.metrics import mean_squared_error
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
return 'rmse', mean_squared_error(labels, preds)
@staticmethod
def assert_metrics_length(cv, expected_length):
for key, value in cv.items():
assert len(value) == expected_length
@pytest.mark.skipif(**tm.no_sklearn())
def test_cv_early_stopping(self):
from sklearn.datasets import load_digits
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
dm = xgb.DMatrix(X, label=y)
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'error'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=5)
self.assert_metrics_length(cv, 3)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=1)
self.assert_metrics_length(cv, 5)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, maximize=True,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
def test_cv_early_stopping_with_multiple_eval_sets_and_metrics(self):
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
dm = xgb.DMatrix(X, label=y)
params = {'objective':'binary:logistic'}
metrics = [['auc'], ['error'], ['logloss'],
['logloss', 'auc'], ['logloss', 'error'], ['error', 'logloss']]
num_iteration_history = []
# If more than one metrics is given, early stopping should use the last metric
for i, m in enumerate(metrics):
result = xgb.cv(params, dm, num_boost_round=1000, nfold=5, stratified=True,
metrics=m, early_stopping_rounds=20, seed=42)
num_iteration_history.append(len(result))
df = result['test-{}-mean'.format(m[-1])]
# When early stopping is invoked, the last metric should be as best it can be.
if m[-1] == 'auc':
assert np.all(df <= df.iloc[-1])
else:
assert np.all(df >= df.iloc[-1])
assert num_iteration_history[:3] == num_iteration_history[3:]
| spark-xgboost-nv-release_1.4.0 | tests/python/test_early_stopping.py |
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
import testing as tm
import pytest
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib.axes import Axes
from graphviz import Source
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
dpath = 'demo/data/agaricus.txt.train'
class TestPlotting:
def test_plotting(self):
m = xgb.DMatrix(dpath)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'F score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, num_trees=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, num_trees=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
| spark-xgboost-nv-release_1.4.0 | tests/python/test_plotting.py |
import xgboost
import os
import generate_models as gm
import testing as tm
import json
import zipfile
import pytest
import copy
import urllib.request
def run_model_param_check(config):
assert config['learner']['learner_model_param']['num_feature'] == str(4)
assert config['learner']['learner_train_param']['booster'] == 'gbtree'
def run_booster_check(booster, name):
config = json.loads(booster.save_config())
run_model_param_check(config)
if name.find('cls') != -1:
assert (len(booster.get_dump()) == gm.kForests * gm.kRounds *
gm.kClasses)
assert float(
config['learner']['learner_model_param']['base_score']) == 0.5
assert config['learner']['learner_train_param'][
'objective'] == 'multi:softmax'
elif name.find('logitraw') != -1:
assert len(booster.get_dump()) == gm.kForests * gm.kRounds
assert config['learner']['learner_model_param']['num_class'] == str(0)
assert config['learner']['learner_train_param']['objective'] == 'binary:logitraw'
elif name.find('logit') != -1:
assert len(booster.get_dump()) == gm.kForests * gm.kRounds
assert config['learner']['learner_model_param']['num_class'] == str(0)
assert config['learner']['learner_train_param'][
'objective'] == 'binary:logistic'
elif name.find('ltr') != -1:
assert config['learner']['learner_train_param'][
'objective'] == 'rank:ndcg'
else:
assert name.find('reg') != -1
assert len(booster.get_dump()) == gm.kForests * gm.kRounds
assert float(
config['learner']['learner_model_param']['base_score']) == 0.5
assert config['learner']['learner_train_param'][
'objective'] == 'reg:squarederror'
def run_scikit_model_check(name, path):
if name.find('reg') != -1:
reg = xgboost.XGBRegressor()
reg.load_model(path)
config = json.loads(reg.get_booster().save_config())
if name.find('0.90') != -1:
assert config['learner']['learner_train_param'][
'objective'] == 'reg:linear'
else:
assert config['learner']['learner_train_param'][
'objective'] == 'reg:squarederror'
assert (len(reg.get_booster().get_dump()) ==
gm.kRounds * gm.kForests)
run_model_param_check(config)
elif name.find('cls') != -1:
cls = xgboost.XGBClassifier()
cls.load_model(path)
if name.find('0.90') == -1:
assert len(cls.classes_) == gm.kClasses
assert len(cls._le.classes_) == gm.kClasses
assert cls.n_classes_ == gm.kClasses
assert (len(cls.get_booster().get_dump()) ==
gm.kRounds * gm.kForests * gm.kClasses), path
config = json.loads(cls.get_booster().save_config())
assert config['learner']['learner_train_param'][
'objective'] == 'multi:softprob', path
run_model_param_check(config)
elif name.find('ltr') != -1:
ltr = xgboost.XGBRanker()
ltr.load_model(path)
assert (len(ltr.get_booster().get_dump()) ==
gm.kRounds * gm.kForests)
config = json.loads(ltr.get_booster().save_config())
assert config['learner']['learner_train_param'][
'objective'] == 'rank:ndcg'
run_model_param_check(config)
elif name.find('logitraw') != -1:
logit = xgboost.XGBClassifier()
logit.load_model(path)
assert (len(logit.get_booster().get_dump()) ==
gm.kRounds * gm.kForests)
config = json.loads(logit.get_booster().save_config())
assert config['learner']['learner_train_param']['objective'] == 'binary:logitraw'
elif name.find('logit') != -1:
logit = xgboost.XGBClassifier()
logit.load_model(path)
assert (len(logit.get_booster().get_dump()) ==
gm.kRounds * gm.kForests)
config = json.loads(logit.get_booster().save_config())
assert config['learner']['learner_train_param'][
'objective'] == 'binary:logistic'
else:
assert False
@pytest.mark.skipif(**tm.no_sklearn())
def test_model_compatibility():
'''Test model compatibility, can only be run on CI as others don't
have the credentials.
'''
path = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(path, 'models')
zip_path, _ = urllib.request.urlretrieve('https://xgboost-ci-jenkins-artifacts.s3-us-west-2' +
'.amazonaws.com/xgboost_model_compatibility_test.zip')
with zipfile.ZipFile(zip_path, 'r') as z:
z.extractall(path)
models = [
os.path.join(root, f) for root, subdir, files in os.walk(path)
for f in files
if f != 'version'
]
assert models
for path in models:
name = os.path.basename(path)
if name.startswith('xgboost-'):
booster = xgboost.Booster(model_file=path)
run_booster_check(booster, name)
# Do full serialization.
booster = copy.copy(booster)
run_booster_check(booster, name)
elif name.startswith('xgboost_scikit'):
run_scikit_model_check(name, path)
else:
assert False
| spark-xgboost-nv-release_1.4.0 | tests/python/test_model_compatibility.py |
# -*- coding: utf-8 -*-
import numpy as np
import xgboost
import testing as tm
import pytest
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestInteractionConstraints:
def run_interaction_constraints(self, tree_method):
x1 = np.random.normal(loc=1.0, scale=1.0, size=1000)
x2 = np.random.normal(loc=1.0, scale=1.0, size=1000)
x3 = np.random.choice([1, 2, 3], size=1000, replace=True)
y = x1 + x2 + x3 + x1 * x2 * x3 \
+ np.random.normal(
loc=0.001, scale=1.0, size=1000) + 3 * np.sin(x1)
X = np.column_stack((x1, x2, x3))
dtrain = xgboost.DMatrix(X, label=y)
params = {
'max_depth': 3,
'eta': 0.1,
'nthread': 2,
'interaction_constraints': '[[0, 1]]',
'tree_method': tree_method
}
num_boost_round = 12
# Fit a model that only allows interaction between x1 and x2
bst = xgboost.train(
params, dtrain, num_boost_round, evals=[(dtrain, 'train')])
# Set all observations to have the same x3 values then increment
# by the same amount
def f(x):
tmat = xgboost.DMatrix(
np.column_stack((x1, x2, np.repeat(x, 1000))))
return bst.predict(tmat)
preds = [f(x) for x in [1, 2, 3]]
# Check incrementing x3 has the same effect on all observations
# since x3 is constrained to be independent of x1 and x2
# and all observations start off from the same x3 value
diff1 = preds[1] - preds[0]
assert np.all(np.abs(diff1 - diff1[0]) < 1e-4)
diff2 = preds[2] - preds[1]
assert np.all(np.abs(diff2 - diff2[0]) < 1e-4)
def test_exact_interaction_constraints(self):
self.run_interaction_constraints(tree_method='exact')
def test_hist_interaction_constraints(self):
self.run_interaction_constraints(tree_method='hist')
def test_approx_interaction_constraints(self):
self.run_interaction_constraints(tree_method='approx')
@pytest.mark.skipif(**tm.no_sklearn())
def training_accuracy(self, tree_method):
from sklearn.metrics import accuracy_score
dtrain = xgboost.DMatrix(dpath + 'agaricus.txt.train?indexing_mode=1')
dtest = xgboost.DMatrix(dpath + 'agaricus.txt.test?indexing_mode=1')
params = {
'eta': 1,
'max_depth': 6,
'objective': 'binary:logistic',
'tree_method': tree_method,
'interaction_constraints': '[[1,2], [2,3,4]]'
}
num_boost_round = 5
params['grow_policy'] = 'lossguide'
bst = xgboost.train(params, dtrain, num_boost_round)
pred_dtest = (bst.predict(dtest) < 0.5)
assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
params['grow_policy'] = 'depthwise'
bst = xgboost.train(params, dtrain, num_boost_round)
pred_dtest = (bst.predict(dtest) < 0.5)
assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
def test_hist_training_accuracy(self):
self.training_accuracy(tree_method='hist')
def test_exact_training_accuracy(self):
self.training_accuracy(tree_method='exact')
def test_approx_training_accuracy(self):
self.training_accuracy(tree_method='approx')
| spark-xgboost-nv-release_1.4.0 | tests/python/test_interaction_constraints.py |
import numpy as np
import xgboost as xgb
from numpy.testing import assert_approx_equal
train_data = xgb.DMatrix(np.array([[1]]), label=np.array([1]))
class TestTreeRegularization:
def test_alpha(self):
params = {
'tree_method': 'exact', 'verbosity': 0,
'objective': 'reg:squarederror',
'eta': 1,
'lambda': 0,
'alpha': 0.1
}
model = xgb.train(params, train_data, 1)
preds = model.predict(train_data)
# Default prediction (with no trees) is 0.5
# sum_grad = (0.5 - 1.0)
# sum_hess = 1.0
# 0.9 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / sum_hess
assert_approx_equal(preds[0], 0.9)
def test_lambda(self):
params = {
'tree_method': 'exact', 'verbosity': 0,
'objective': 'reg:squarederror',
'eta': 1,
'lambda': 1,
'alpha': 0
}
model = xgb.train(params, train_data, 1)
preds = model.predict(train_data)
# Default prediction (with no trees) is 0.5
# sum_grad = (0.5 - 1.0)
# sum_hess = 1.0
# 0.75 = 0.5 - sum_grad / (sum_hess + lambda)
assert_approx_equal(preds[0], 0.75)
def test_alpha_and_lambda(self):
params = {
'tree_method': 'exact', 'verbosity': 1,
'objective': 'reg:squarederror',
'eta': 1,
'lambda': 1,
'alpha': 0.1
}
model = xgb.train(params, train_data, 1)
preds = model.predict(train_data)
# Default prediction (with no trees) is 0.5
# sum_grad = (0.5 - 1.0)
# sum_hess = 1.0
# 0.7 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / (sum_hess + lambda)
assert_approx_equal(preds[0], 0.7)
| spark-xgboost-nv-release_1.4.0 | tests/python/test_tree_regularization.py |
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
import itertools
import re
import scipy
import scipy.special
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestSHAP:
def test_feature_importances(self):
data = np.random.randn(100, 5)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5']
dm = xgb.DMatrix(data, label=target,
feature_names=features)
params = {'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'eta': 0.3,
'num_class': 3}
bst = xgb.train(params, dm, num_boost_round=10)
# number of feature importances should == number of features
scores1 = bst.get_score()
scores2 = bst.get_score(importance_type='weight')
scores3 = bst.get_score(importance_type='cover')
scores4 = bst.get_score(importance_type='gain')
scores5 = bst.get_score(importance_type='total_cover')
scores6 = bst.get_score(importance_type='total_gain')
assert len(scores1) == len(features)
assert len(scores2) == len(features)
assert len(scores3) == len(features)
assert len(scores4) == len(features)
assert len(scores5) == len(features)
assert len(scores6) == len(features)
# check backwards compatibility of get_fscore
fscores = bst.get_fscore()
assert scores1 == fscores
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
def fn(max_depth, num_rounds):
# train
params = {'max_depth': max_depth, 'eta': 1, 'verbosity': 0}
bst = xgb.train(params, dtrain, num_boost_round=num_rounds)
# predict
preds = bst.predict(dtest)
contribs = bst.predict(dtest, pred_contribs=True)
# result should be (number of features + BIAS) * number of rows
assert contribs.shape == (dtest.num_row(), dtest.num_col() + 1)
# sum of contributions should be same as predictions
np.testing.assert_array_almost_equal(np.sum(contribs, axis=1), preds)
# for max_depth, num_rounds in itertools.product(range(0, 3), range(1, 5)):
# yield fn, max_depth, num_rounds
# check that we get the right SHAP values for a basic AND example
# (https://arxiv.org/abs/1706.06060)
X = np.zeros((4, 2))
X[0, :] = 1
X[1, 0] = 1
X[2, 1] = 1
y = np.zeros(4)
y[0] = 1
param = {"max_depth": 2, "base_score": 0.0, "eta": 1.0, "lambda": 0}
bst = xgb.train(param, xgb.DMatrix(X, label=y), 1)
out = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True)
assert out[0, 0] == 0.375
assert out[0, 1] == 0.375
assert out[0, 2] == 0.25
def parse_model(model):
trees = []
r_exp = r"([0-9]+):\[f([0-9]+)<([0-9\.e-]+)\] yes=([0-9]+),no=([0-9]+).*cover=([0-9e\.]+)"
r_exp_leaf = r"([0-9]+):leaf=([0-9\.e-]+),cover=([0-9e\.]+)"
for tree in model.get_dump(with_stats=True):
lines = list(tree.splitlines())
trees.append([None for i in range(len(lines))])
for line in lines:
match = re.search(r_exp, line)
if match is not None:
ind = int(match.group(1))
while ind >= len(trees[-1]):
trees[-1].append(None)
trees[-1][ind] = {
"yes_ind": int(match.group(4)),
"no_ind": int(match.group(5)),
"value": None,
"threshold": float(match.group(3)),
"feature_index": int(match.group(2)),
"cover": float(match.group(6))
}
else:
match = re.search(r_exp_leaf, line)
ind = int(match.group(1))
while ind >= len(trees[-1]):
trees[-1].append(None)
trees[-1][ind] = {
"value": float(match.group(2)),
"cover": float(match.group(3))
}
return trees
def exp_value_rec(tree, z, x, i=0):
if tree[i]["value"] is not None:
return tree[i]["value"]
else:
ind = tree[i]["feature_index"]
if z[ind] == 1:
if x[ind] < tree[i]["threshold"]:
return exp_value_rec(tree, z, x, tree[i]["yes_ind"])
else:
return exp_value_rec(tree, z, x, tree[i]["no_ind"])
else:
r_yes = tree[tree[i]["yes_ind"]]["cover"] / tree[i]["cover"]
out = exp_value_rec(tree, z, x, tree[i]["yes_ind"])
val = out * r_yes
r_no = tree[tree[i]["no_ind"]]["cover"] / tree[i]["cover"]
out = exp_value_rec(tree, z, x, tree[i]["no_ind"])
val += out * r_no
return val
def exp_value(trees, z, x):
return np.sum([exp_value_rec(tree, z, x) for tree in trees])
def all_subsets(ss):
return itertools.chain(*map(lambda x: itertools.combinations(ss, x), range(0, len(ss) + 1)))
def shap_value(trees, x, i, cond=None, cond_value=None):
M = len(x)
z = np.zeros(M)
other_inds = list(set(range(M)) - set([i]))
if cond is not None:
other_inds = list(set(other_inds) - set([cond]))
z[cond] = cond_value
M -= 1
total = 0.0
for subset in all_subsets(other_inds):
if len(subset) > 0:
z[list(subset)] = 1
v1 = exp_value(trees, z, x)
z[i] = 1
v2 = exp_value(trees, z, x)
total += (v2 - v1) / (scipy.special.binom(M - 1, len(subset)) * M)
z[i] = 0
z[list(subset)] = 0
return total
def shap_values(trees, x):
vals = [shap_value(trees, x, i) for i in range(len(x))]
vals.append(exp_value(trees, np.zeros(len(x)), x))
return np.array(vals)
def interaction_values(trees, x):
M = len(x)
out = np.zeros((M + 1, M + 1))
for i in range(len(x)):
for j in range(len(x)):
if i != j:
out[i, j] = interaction_value(trees, x, i, j) / 2
svals = shap_values(trees, x)
main_effects = svals - out.sum(1)
out[np.diag_indices_from(out)] = main_effects
return out
def interaction_value(trees, x, i, j):
M = len(x)
z = np.zeros(M)
other_inds = list(set(range(M)) - set([i, j]))
total = 0.0
for subset in all_subsets(other_inds):
if len(subset) > 0:
z[list(subset)] = 1
v00 = exp_value(trees, z, x)
z[i] = 1
v10 = exp_value(trees, z, x)
z[j] = 1
v11 = exp_value(trees, z, x)
z[i] = 0
v01 = exp_value(trees, z, x)
z[j] = 0
total += (v11 - v01 - v10 + v00) / (scipy.special.binom(M - 2, len(subset)) * (M - 1))
z[list(subset)] = 0
return total
# test a simple and function
M = 2
N = 4
X = np.zeros((N, M))
X[0, :] = 1
X[1, 0] = 1
X[2, 1] = 1
y = np.zeros(N)
y[0] = 1
param = {"max_depth": 2, "base_score": 0.0, "eta": 1.0, "lambda": 0}
bst = xgb.train(param, xgb.DMatrix(X, label=y), 1)
brute_force = shap_values(parse_model(bst), X[0, :])
fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True)
assert np.linalg.norm(brute_force - fast_method[0, :]) < 1e-4
brute_force = interaction_values(parse_model(bst), X[0, :])
fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_interactions=True)
assert np.linalg.norm(brute_force - fast_method[0, :, :]) < 1e-4
# test a random function
np.random.seed(0)
M = 2
N = 4
X = np.random.randn(N, M)
y = np.random.randn(N)
param = {"max_depth": 2, "base_score": 0.0, "eta": 1.0, "lambda": 0}
bst = xgb.train(param, xgb.DMatrix(X, label=y), 1)
brute_force = shap_values(parse_model(bst), X[0, :])
fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True)
assert np.linalg.norm(brute_force - fast_method[0, :]) < 1e-4
brute_force = interaction_values(parse_model(bst), X[0, :])
fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_interactions=True)
assert np.linalg.norm(brute_force - fast_method[0, :, :]) < 1e-4
# test another larger more complex random function
np.random.seed(0)
M = 5
N = 100
X = np.random.randn(N, M)
y = np.random.randn(N)
base_score = 1.0
param = {"max_depth": 5, "base_score": base_score, "eta": 0.1, "gamma": 2.0}
bst = xgb.train(param, xgb.DMatrix(X, label=y), 10)
brute_force = shap_values(parse_model(bst), X[0, :])
brute_force[-1] += base_score
fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True)
assert np.linalg.norm(brute_force - fast_method[0, :]) < 1e-4
brute_force = interaction_values(parse_model(bst), X[0, :])
brute_force[-1, -1] += base_score
fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_interactions=True)
assert np.linalg.norm(brute_force - fast_method[0, :, :]) < 1e-4
| spark-xgboost-nv-release_1.4.0 | tests/python/test_shap.py |
Subsets and Splits